hexsha
stringlengths
40
40
size
int64
1
1.03M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
239
max_stars_repo_name
stringlengths
5
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
239
max_issues_repo_name
stringlengths
5
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
239
max_forks_repo_name
stringlengths
5
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.03M
avg_line_length
float64
1
958k
max_line_length
int64
1
1.03M
alphanum_fraction
float64
0
1
4a011eb0ea7b466201169588425bd5340a0cfb89
2,121
py
Python
sinkhorn_663/log_domain_skh.py
congwei-yang/663-Final-Project
3502a00167075e8a7b7cca1da01e7352e2f1c674
[ "MIT" ]
null
null
null
sinkhorn_663/log_domain_skh.py
congwei-yang/663-Final-Project
3502a00167075e8a7b7cca1da01e7352e2f1c674
[ "MIT" ]
null
null
null
sinkhorn_663/log_domain_skh.py
congwei-yang/663-Final-Project
3502a00167075e8a7b7cca1da01e7352e2f1c674
[ "MIT" ]
2
2021-09-22T02:08:57.000Z
2021-11-23T23:14:39.000Z
from numpy import zeros, reshape, log, exp from numpy import sum as npsum from numba import jit @jit(nopython = True, parallel = True) def row_softmin(A, lam): """ Computes row softmin of a matrix :param A: Matrix to compute row softmin :param lam: Regularization parameter :return: Row softmin of a matrix """ epsilon = 1/lam result = - epsilon * log(npsum(exp(-A/epsilon), axis = 1)) return result @jit(nopython = True, parallel = True) def col_softmin(A, lam): """ Computes the column softmin of a matrix :param A: Matrix given :param lam: Regularization parameter :return: Column softmin """ epsilon = 1/lam result = - epsilon * log(npsum(exp(-A/epsilon), axis = 0)) return result @jit(nopython = True, parallel = True) def log_domain_sinkhorn(r, c, M, lam, tol = 1e-6, maxiter = 10000): """ Computes Sinkhorn distance between empirical measure r and c in log domain :param r: Source empirical measure :param c: Target empirical measures :param M: Cost matrix :param lam: Regularization parameter :param tol: Accuracy tolerance :param maxiter: Maximum iteration number :return: An array of Sinkhorn distance """ d = len(r) epsilon = 1/lam f_prev = zeros(d) g_prev = zeros(d) f = row_softmin(M - reshape(f_prev, (d,1)) - g_prev, lam = lam) + f_prev + epsilon * log(r) g = col_softmin(M - reshape(f, (d,1)) - g_prev, lam = lam) + g_prev + epsilon * log(c) dist_prev = 0 dist = 10 iteration = 0 while abs(dist - dist_prev) > tol: f_prev = f g_prev = g f = row_softmin(M - reshape(f_prev, (d,1)) - g_prev, lam = lam) + f_prev + epsilon * log(r) g = col_softmin(M - reshape(f, (d,1)) - g_prev, lam = lam) + g_prev + epsilon * log(c) K_lg = -lam * M u_lg = f / epsilon v_lg = g / epsilon P_lg = K_lg + reshape(u_lg, (d, 1)) + v_lg P = exp(P_lg) dist_prev = dist dist = npsum(P*M) iteration += 1 if iteration >= maxiter: break return dist, iteration
32.630769
99
0.609618
4a011ed76d75f5c4c07d528202461bf57d5df31e
120,052
py
Python
4. Sensitivity Analysis, Appendix A/PF_FP_EC_RB.py
rioarya/Land-Conversion_Woody-Biomass-Utilization-Scenarios
0042fd4333212e65735f3643ecb59971d1bd9466
[ "MIT" ]
null
null
null
4. Sensitivity Analysis, Appendix A/PF_FP_EC_RB.py
rioarya/Land-Conversion_Woody-Biomass-Utilization-Scenarios
0042fd4333212e65735f3643ecb59971d1bd9466
[ "MIT" ]
null
null
null
4. Sensitivity Analysis, Appendix A/PF_FP_EC_RB.py
rioarya/Land-Conversion_Woody-Biomass-Utilization-Scenarios
0042fd4333212e65735f3643ecb59971d1bd9466
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Dec 13 15:21:55 2019 @author: raryapratama """ #%% #Step (1): Import Python libraries, set land conversion scenarios general parameters import numpy as np import matplotlib.pyplot as plt from scipy.integrate import quad import seaborn as sns import pandas as pd #PF_FP_EC Scenario ##Set parameters #Parameters for primary forest initAGB = 233 #source: van Beijma et al. (2018) initAGB_min = 233-72 initAGB_max = 233 + 72 #parameters for timber plantation. Source: Khasanah et al. (2015) tf = 201 a = 0.082 b = 2.53 #%% #Step (2_1): C loss from the harvesting/clear cut df1_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_7y') df1_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_18y') df1_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') df1_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') dfE_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') t = range(0,tf,1) c_firewood_energy_S1_Ac7 = df1_Ac7['Firewood_other_energy_use'].values c_firewood_energy_S1_Ac18 = df1_Ac18['Firewood_other_energy_use'].values c_firewood_energy_S1_Tgr40 = df1_Tgr40['Firewood_other_energy_use'].values c_firewood_energy_S1_Tgr60 = df1_Tgr60['Firewood_other_energy_use'].values c_firewood_energy_E_Hbr40 = dfE_Hbr40['Firewood_other_energy_use'].values #%% #Step (2_2): C loss from the harvesting/clear cut as wood pellets dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') c_pellets_Hbr_40y = dfE['Wood_pellets'].values #%% #Step (3): Aboveground biomass (AGB) decomposition #Ac_7y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_7y') tf = 201 t = np.arange(tf) decomp_tot_S1_Ac_7y = df['C_remainAGB'].values #S1_Ac_18y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_18y') tf = 201 t = np.arange(tf) decomp_tot_S1_Ac_18y = df['C_remainAGB'].values #S1_Tgr_40y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') tf = 201 t = np.arange(tf) decomp_tot_S1_Tgr_40y = df['C_remainAGB'].values #S1_Tgr_60y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') tf = 201 t = np.arange(tf) decomp_tot_S1_Tgr_60y = df['C_remainAGB'].values #E df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') tf = 201 t = np.arange(tf) decomp_tot_E_Hbr_40y = df['C_remainAGB'].values #plotting t = np.arange(0,tf) plt.plot(t,decomp_tot_S1_Ac_7y,label='Ac_7y') plt.plot(t,decomp_tot_S1_Ac_18y,label='Ac_18y') plt.plot(t,decomp_tot_S1_Tgr_40y,label='Tgr_40y') plt.plot(t,decomp_tot_S1_Tgr_60y,label='Tgr_60y') plt.plot(t,decomp_tot_E_Hbr_40y,label='E_Hbr_40y') plt.xlim(0,200) plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) plt.show() #%% #Step (4): Dynamic stock model of in-use wood materials from dynamic_stock_model import DynamicStockModel df1_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_7y') df1_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_18y') df1_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') df1_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') dfE_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') #product lifetime #paper P = 4 #furniture F = 20 #building materials B = 35 TestDSM1_Ac7 = DynamicStockModel(t = df1_Ac7['Year'].values, i = df1_Ac7['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([P]), 'StdDev': np.array([0.3*P])}) TestDSM1_Ac18 = DynamicStockModel(t = df1_Ac18['Year'].values, i = df1_Ac18['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([F]), 'StdDev': np.array([0.3*F])}) TestDSM1_Tgr40 = DynamicStockModel(t = df1_Tgr40['Year'].values, i = df1_Tgr40['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])}) TestDSM1_Tgr60 = DynamicStockModel(t = df1_Tgr60['Year'].values, i = df1_Tgr60['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])}) TestDSME_Hbr40 = DynamicStockModel(t = dfE_Hbr40['Year'].values, i = dfE_Hbr40['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])}) CheckStr1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.dimension_check() CheckStr1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.dimension_check() CheckStr1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.dimension_check() CheckStr1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.dimension_check() CheckStrE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.dimension_check() Stock_by_cohort1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_s_c_inflow_driven() Stock_by_cohort1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_s_c_inflow_driven() Stock_by_cohort1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_s_c_inflow_driven() Stock_by_cohort1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_s_c_inflow_driven() Stock_by_cohortE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_s_c_inflow_driven() S1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_stock_total() S1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_stock_total() S1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_stock_total() S1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_stock_total() SE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_stock_total() O_C1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_o_c_from_s_c() O_C1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_o_c_from_s_c() O_C1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_o_c_from_s_c() O_C1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_o_c_from_s_c() O_CE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_o_c_from_s_c() O1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_outflow_total() O1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_outflow_total() O1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_outflow_total() O1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_outflow_total() OE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_outflow_total() DS1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_stock_change() DS1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_stock_change() DS1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_stock_change() DS1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_stock_change() DSE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_stock_change() Bal1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.check_stock_balance() Bal1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.check_stock_balance() Bal1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.check_stock_balance() Bal1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.check_stock_balance() BalE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.check_stock_balance() #print output flow print(TestDSM1_Ac7.o) print(TestDSM1_Ac18.o) print(TestDSM1_Tgr40.o) print(TestDSM1_Tgr60.o) print(TestDSME_Hbr40.o) #%% #Step (5): Biomass growth ## one-year gap between rotation cycle # A. crassicarpa (Source: Anitha et al., 2015; Adiriono, 2009). Code: Ac tf_Ac_7y = 8 tf_Ac_18y = 19 A1 = range(1,tf_Ac_7y,1) A2 = range(1,tf_Ac_18y,1) #calculate the biomass and carbon content of A. crassicarpa over time (7y) def Y_Ac_7y(A1): return 44/12*1000*np.exp(4.503-(2.559/A1)) output_Y_Ac_7y = np.array([Y_Ac_7y(A1i) for A1i in A1]) print(output_Y_Ac_7y) #insert 0 value to the first element of the output result output_Y_Ac_7y = np.insert(output_Y_Ac_7y,0,0) print(output_Y_Ac_7y) #calculate the biomass and carbon content of A. crassicarpa over time (18y) def Y_Ac_18y(A2): return 44/12*1000*np.exp(4.503-(2.559/A2)) output_Y_Ac_18y = np.array([Y_Ac_18y(A2i) for A2i in A2]) print(output_Y_Ac_18y) #insert 0 value to the first element of the output result output_Y_Ac_18y = np.insert(output_Y_Ac_18y,0,0) print(output_Y_Ac_18y) ##26 times 8-year cycle (+1 year gap after the FP harvest)of new AGB of A. crassicarpa (7y), zero year gap between the cycle counter_7y = range(0,26,1) y_Ac_7y = [] for i in counter_7y: y_Ac_7y.append(output_Y_Ac_7y) flat_list_Ac_7y = [] for sublist in y_Ac_7y: for item in sublist: flat_list_Ac_7y.append(item) #the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf flat_list_Ac_7y = flat_list_Ac_7y[:len(flat_list_Ac_7y)-7] print(len(flat_list_Ac_7y)) ##11 times 19-year cycle (+1 year gap after the FP harvest) of new AGB of A. crassicarpa (18y), zero year gap between the cycle counter_18y = range(0,11,1) y_Ac_18y = [] for i in counter_18y: y_Ac_18y.append(output_Y_Ac_18y) flat_list_Ac_18y = [] for sublist in y_Ac_18y: for item in sublist: flat_list_Ac_18y.append(item) #the length of the list is now 209, so we remove the last 8 elements of the list to make the len=tf flat_list_Ac_18y = flat_list_Ac_18y[:len(flat_list_Ac_18y)-8] #####Check the flat list length for Hbr ## T. grandis (Source: Anitha et al., 2015; Adiriono, 2009). Code: Tgr tf_Tgr_40y = 41 tf_Tgr_60y = 61 T1 = range(0,tf_Tgr_40y,1) T2 = range(0,tf_Tgr_60y,1) #calculate the biomass and carbon content of T. grandis over time (40y) def Y_Tgr_40y(T1): return 44/12*1000*2.114*(T1**0.941) output_Y_Tgr_40y = np.array([Y_Tgr_40y(T1i) for T1i in T1]) print(output_Y_Tgr_40y) #calculate the biomass and carbon content of T. grandis over time (60y) def Y_Tgr_60y(T2): return 44/12*1000*2.114*(T2**0.941) output_Y_Tgr_60y = np.array([Y_Tgr_60y(T2i) for T2i in T2]) print(output_Y_Tgr_60y) ##5 times 41-year cycle of new AGB of T. grandis (40y), zero year gap between the cycle counter_40y = range(0,5,1) y_Tgr_40y = [] for i in counter_40y: y_Tgr_40y.append(output_Y_Tgr_40y) flat_list_Tgr_40y = [] for sublist in y_Tgr_40y: for item in sublist: flat_list_Tgr_40y.append(item) #the length of the list is now 205, so we remove the last 4 elements of the list to make the len=tf flat_list_Tgr_40y = flat_list_Tgr_40y[:len(flat_list_Tgr_40y)-4] ##4 times 60-year cycle of new AGB of T. grandis (60y), zero year gap between the cycle counter_60y = range(0,4,1) y_Tgr_60y = [] for i in counter_60y: y_Tgr_60y.append(output_Y_Tgr_60y) flat_list_Tgr_60y = [] for sublist in y_Tgr_60y: for item in sublist: flat_list_Tgr_60y.append(item) #the length of the list is now 244, so we remove the last 43 elements of the list to make the len=tf flat_list_Tgr_60y = flat_list_Tgr_60y[:len(flat_list_Tgr_60y)-43] ## H. brasiliensis (Source: Guillaume et al., 2018). Code: Hbr tf_Hbr_40y = 41 H1 = range(0,tf_Hbr_40y,1) #calculate the biomass and carbon content of H. brasiliensis over time (40y) def Y_Hbr_40y(H1): return 44/12*1000*1.55*H1 output_Y_Hbr_40y = np.array([Y_Hbr_40y(H1i) for H1i in H1]) print(output_Y_Hbr_40y) ##5 times 40-year cycle of new AGB of H. brasiliensis (40y), zero year gap between the cycle counter_40y = range(0,5,1) y_Hbr_40y = [] for i in counter_40y: y_Hbr_40y.append(output_Y_Hbr_40y) flat_list_Hbr_40y = [] for sublist in y_Hbr_40y: for item in sublist: flat_list_Hbr_40y.append(item) #the length of the list is now 205, so we remove the last 4 elements of the list to make the len=tf flat_list_Hbr_40y = flat_list_Hbr_40y[:len(flat_list_Hbr_40y)-4] #plotting t = range (0,tf,1) plt.xlim([0, 200]) plt.plot(t, flat_list_Ac_7y, color='lightcoral') plt.plot(t, flat_list_Ac_18y, color='deeppink') plt.plot(t, flat_list_Hbr_40y, color='darkviolet') plt.plot(t, flat_list_Tgr_40y) plt.plot(t, flat_list_Tgr_60y, color='seagreen') #plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha='0.4') plt.xlabel('Time (year)') plt.ylabel('AGB (tC/ha)') plt.show() ##Yearly sequestration ## A. crassicarpa (7y) #find the yearly sequestration by calculating the differences between elements in list 'flat_list_Ac_7y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) flat_list_Ac_7y = [p - q for q, p in zip(flat_list_Ac_7y, flat_list_Ac_7y[1:])] #since there is no sequestration between the replanting year (e.g., year 7 to 8), we have to replace negative numbers in 'flat_list_Ac_7y' with 0 values flat_list_Ac_7y = [0 if i < 0 else i for i in flat_list_Ac_7y] #insert 0 value to the list as the first element, because there is no sequestration in year 0 var = 0 flat_list_Ac_7y.insert(0,var) #make 'flat_list_Ac_7y' elements negative numbers to denote sequestration flat_list_Ac_7y = [ -x for x in flat_list_Ac_7y] print(flat_list_Ac_7y) ##A. crassicarpa (18y) #find the yearly sequestration by calculating the differences between elements in list 'flat_list_Ac_18y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) flat_list_Ac_18y = [t - u for u, t in zip(flat_list_Ac_18y, flat_list_Ac_18y[1:])] #since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Ac_18y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) flat_list_Ac_18y = [0 if i < 0 else i for i in flat_list_Ac_18y] #insert 0 value to the list as the first element, because there is no sequestration in year 0 var = 0 flat_list_Ac_18y.insert(0,var) #make 'flat_list_plasma' elements negative numbers to denote sequestration flat_list_Ac_18y = [ -x for x in flat_list_Ac_18y] print(flat_list_Ac_18y) ##T. grandis (40y) #find the yearly sequestration by calculating the differences between elements in list 'flat_list_Tgr_40y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) flat_list_Tgr_40y = [b - c for c, b in zip(flat_list_Tgr_40y, flat_list_Tgr_40y[1:])] #since there is no sequestration between the replanting year (e.g., year 40 to 41), we have to replace negative numbers in 'flat_list_Tgr_40y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) flat_list_Tgr_40y = [0 if i < 0 else i for i in flat_list_Tgr_40y] #insert 0 value to the list as the first element, because there is no sequestration in year 0 var = 0 flat_list_Tgr_40y.insert(0,var) #make 'flat_list_plasma' elements negative numbers to denote sequestration flat_list_Tgr_40y = [-x for x in flat_list_Tgr_40y] print(flat_list_Tgr_40y) ##T. grandis (60y) #find the yearly sequestration by calculating the differences between elements in list 'flat_list_Tgr_60y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) flat_list_Tgr_60y = [k - l for l, k in zip(flat_list_Tgr_60y, flat_list_Tgr_60y[1:])] #since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Tgr_60y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) flat_list_Tgr_60y = [0 if i < 0 else i for i in flat_list_Tgr_60y] #insert 0 value to the list as the first element, because there is no sequestration in year 0 var = 0 flat_list_Tgr_60y.insert(0,var) #make 'flat_list_plasma' elements negative numbers to denote sequestration flat_list_Tgr_60y = [ -x for x in flat_list_Tgr_60y] print(flat_list_Tgr_60y) ##H. brasiliensis (40y) #find the yearly sequestration by calculating the differences between elements in list 'flat_list_Hbr_40y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) flat_list_Hbr_40y = [c - d for d, c in zip(flat_list_Hbr_40y, flat_list_Hbr_40y[1:])] #since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Hbr_40y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) flat_list_Hbr_40y = [0 if i < 0 else i for i in flat_list_Hbr_40y] #insert 0 value to the list as the first element, because there is no sequestration in year 0 var = 0 flat_list_Hbr_40y.insert(0,var) #make 'flat_list_plasma' elements negative numbers to denote sequestration flat_list_Hbr_40y = [ -x for x in flat_list_Hbr_40y] print(flat_list_Hbr_40y) #%% #Step (6): post-harvest processing of wood #post-harvest wood processing df1_Ac_7y = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_7y') df1_Ac_18y = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_18y') df1_Tgr_40y = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') dfl_Tgr_60y = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') dfE_Hbr_40y = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') t = range(0,tf,1) PH_Emissions_HWP1_Ac_7y = df1_Ac_7y['PH_Emissions_HWP'].values PH_Emissions_HWP1_Ac_18y = df1_Ac_18y['PH_Emissions_HWP'].values PH_Emissions_HWP1_Tgr_40y = df1_Tgr_40y['PH_Emissions_HWP'].values PH_Emissions_HWP1_Tgr_60y = dfl_Tgr_60y['PH_Emissions_HWP'].values PH_Emissions_HWPE_Hbr_40y = dfE_Hbr_40y ['PH_Emissions_HWP'].values #%% #Step (7_1): landfill gas decomposition (CH4) #CH4 decomposition hl = 20 #half-live k = (np.log(2))/hl #S1_Ac_7y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_7y') tf = 201 t = np.arange(tf) def decomp_CH4_S1_Ac_7y(t,remainAGB_CH4_S1_Ac_7y): return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Ac_7y #set zero matrix output_decomp_CH4_S1_Ac_7y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values))) for i,remain_part_CH4_S1_Ac_7y in enumerate(df['Landfill_decomp_CH4'].values): #print(i,remain_part) output_decomp_CH4_S1_Ac_7y[i:,i] = decomp_CH4_S1_Ac_7y(t[:len(t)-i],remain_part_CH4_S1_Ac_7y) print(output_decomp_CH4_S1_Ac_7y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_CH4_S1_Ac_7y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1))) i = 0 while i < tf: subs_matrix_CH4_S1_Ac_7y[:,i] = np.diff(output_decomp_CH4_S1_Ac_7y[:,i]) i = i + 1 print(subs_matrix_CH4_S1_Ac_7y[:,:4]) print(len(subs_matrix_CH4_S1_Ac_7y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_CH4_S1_Ac_7y = subs_matrix_CH4_S1_Ac_7y.clip(max=0) print(subs_matrix_CH4_S1_Ac_7y[:,:4]) #make the results as absolute values subs_matrix_CH4_S1_Ac_7y = abs(subs_matrix_CH4_S1_Ac_7y) print(subs_matrix_CH4_S1_Ac_7y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_CH4_S1_Ac_7y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values))) print(zero_matrix_CH4_S1_Ac_7y) subs_matrix_CH4_S1_Ac_7y = np.vstack((zero_matrix_CH4_S1_Ac_7y, subs_matrix_CH4_S1_Ac_7y)) print(subs_matrix_CH4_S1_Ac_7y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_CH4_S1_Ac_7y = (tf,1) decomp_tot_CH4_S1_Ac_7y = np.zeros(matrix_tot_CH4_S1_Ac_7y) i = 0 while i < tf: decomp_tot_CH4_S1_Ac_7y[:,0] = decomp_tot_CH4_S1_Ac_7y[:,0] + subs_matrix_CH4_S1_Ac_7y[:,i] i = i + 1 print(decomp_tot_CH4_S1_Ac_7y[:,0]) #S1_Ac_18y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_18y') tf = 201 t = np.arange(tf) def decomp_CH4_S1_Ac_18y(t,remainAGB_CH4_S1_Ac_18y): return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Ac_18y #set zero matrix output_decomp_CH4_S1_Ac_18y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values))) for i,remain_part_CH4_S1_Ac_18y in enumerate(df['Landfill_decomp_CH4'].values): #print(i,remain_part) output_decomp_CH4_S1_Ac_18y[i:,i] = decomp_CH4_S1_Ac_18y(t[:len(t)-i],remain_part_CH4_S1_Ac_18y) print(output_decomp_CH4_S1_Ac_18y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_CH4_S1_Ac_18y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1))) i = 0 while i < tf: subs_matrix_CH4_S1_Ac_18y[:,i] = np.diff(output_decomp_CH4_S1_Ac_18y[:,i]) i = i + 1 print(subs_matrix_CH4_S1_Ac_18y[:,:4]) print(len(subs_matrix_CH4_S1_Ac_18y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_CH4_S1_Ac_18y = subs_matrix_CH4_S1_Ac_18y.clip(max=0) print(subs_matrix_CH4_S1_Ac_18y[:,:4]) #make the results as absolute values subs_matrix_CH4_S1_Ac_18y = abs(subs_matrix_CH4_S1_Ac_18y) print(subs_matrix_CH4_S1_Ac_18y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_CH4_S1_Ac_18y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values))) print(zero_matrix_CH4_S1_Ac_18y) subs_matrix_CH4_S1_Ac_18y = np.vstack((zero_matrix_CH4_S1_Ac_18y, subs_matrix_CH4_S1_Ac_18y)) print(subs_matrix_CH4_S1_Ac_18y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_CH4_S1_Ac_18y = (tf,1) decomp_tot_CH4_S1_Ac_18y = np.zeros(matrix_tot_CH4_S1_Ac_18y) i = 0 while i < tf: decomp_tot_CH4_S1_Ac_18y[:,0] = decomp_tot_CH4_S1_Ac_18y[:,0] + subs_matrix_CH4_S1_Ac_18y[:,i] i = i + 1 print(decomp_tot_CH4_S1_Ac_18y[:,0]) #S1_Tgr_40y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') tf = 201 t = np.arange(tf) def decomp_CH4_S1_Tgr_40y(t,remainAGB_CH4_S1_Tgr_40y): return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Tgr_40y #set zero matrix output_decomp_CH4_S1_Tgr_40y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values))) for i,remain_part_CH4_S1_Tgr_40y in enumerate(df['Landfill_decomp_CH4'].values): #print(i,remain_part) output_decomp_CH4_S1_Tgr_40y[i:,i] = decomp_CH4_S1_Tgr_40y(t[:len(t)-i],remain_part_CH4_S1_Tgr_40y) print(output_decomp_CH4_S1_Tgr_40y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_CH4_S1_Tgr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1))) i = 0 while i < tf: subs_matrix_CH4_S1_Tgr_40y[:,i] = np.diff(output_decomp_CH4_S1_Tgr_40y[:,i]) i = i + 1 print(subs_matrix_CH4_S1_Tgr_40y[:,:4]) print(len(subs_matrix_CH4_S1_Tgr_40y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_CH4_S1_Tgr_40y = subs_matrix_CH4_S1_Tgr_40y.clip(max=0) print(subs_matrix_CH4_S1_Tgr_40y[:,:4]) #make the results as absolute values subs_matrix_CH4_S1_Tgr_40y = abs(subs_matrix_CH4_S1_Tgr_40y) print(subs_matrix_CH4_S1_Tgr_40y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_CH4_S1_Tgr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values))) print(zero_matrix_CH4_S1_Tgr_40y) subs_matrix_CH4_S1_Tgr_40y = np.vstack((zero_matrix_CH4_S1_Tgr_40y, subs_matrix_CH4_S1_Tgr_40y)) print(subs_matrix_CH4_S1_Tgr_40y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_CH4_S1_Tgr_40y = (tf,1) decomp_tot_CH4_S1_Tgr_40y = np.zeros(matrix_tot_CH4_S1_Tgr_40y) i = 0 while i < tf: decomp_tot_CH4_S1_Tgr_40y[:,0] = decomp_tot_CH4_S1_Tgr_40y[:,0] + subs_matrix_CH4_S1_Tgr_40y[:,i] i = i + 1 print(decomp_tot_CH4_S1_Tgr_40y[:,0]) #S1_Tgr_60y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') tf = 201 t = np.arange(tf) def decomp_CH4_S1_Tgr_60y(t,remainAGB_CH4_S1_Tgr_60y): return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Tgr_60y #set zero matrix output_decomp_CH4_S1_Tgr_60y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values))) for i,remain_part_CH4_S1_Tgr_60y in enumerate(df['Landfill_decomp_CH4'].values): #print(i,remain_part) output_decomp_CH4_S1_Tgr_60y[i:,i] = decomp_CH4_S1_Tgr_60y(t[:len(t)-i],remain_part_CH4_S1_Tgr_60y) print(output_decomp_CH4_S1_Tgr_60y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_CH4_S1_Tgr_60y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1))) i = 0 while i < tf: subs_matrix_CH4_S1_Tgr_60y[:,i] = np.diff(output_decomp_CH4_S1_Tgr_60y[:,i]) i = i + 1 print(subs_matrix_CH4_S1_Tgr_60y[:,:4]) print(len(subs_matrix_CH4_S1_Tgr_60y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_CH4_S1_Tgr_60y = subs_matrix_CH4_S1_Tgr_60y.clip(max=0) print(subs_matrix_CH4_S1_Tgr_60y[:,:4]) #make the results as absolute values subs_matrix_CH4_S1_Tgr_60y = abs(subs_matrix_CH4_S1_Tgr_60y) print(subs_matrix_CH4_S1_Tgr_60y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_CH4_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values))) print(zero_matrix_CH4_S1_Tgr_60y) subs_matrix_CH4_S1_Tgr_60y = np.vstack((zero_matrix_CH4_S1_Tgr_60y, subs_matrix_CH4_S1_Tgr_60y)) print(subs_matrix_CH4_S1_Tgr_60y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_CH4_S1_Tgr_60y = (tf,1) decomp_tot_CH4_S1_Tgr_60y = np.zeros(matrix_tot_CH4_S1_Tgr_60y) i = 0 while i < tf: decomp_tot_CH4_S1_Tgr_60y[:,0] = decomp_tot_CH4_S1_Tgr_60y[:,0] + subs_matrix_CH4_S1_Tgr_60y[:,i] i = i + 1 print(decomp_tot_CH4_S1_Tgr_60y[:,0]) #E df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') tf = 201 t = np.arange(tf) def decomp_CH4_E_Hbr_40y(t,remainAGB_CH4_E_Hbr_40y): return (1-(1-np.exp(-k*t)))*remainAGB_CH4_E_Hbr_40y #set zero matrix output_decomp_CH4_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values))) for i,remain_part_CH4_E_Hbr_40y in enumerate(df['Landfill_decomp_CH4'].values): #print(i,remain_part) output_decomp_CH4_E_Hbr_40y[i:,i] = decomp_CH4_E_Hbr_40y(t[:len(t)-i],remain_part_CH4_E_Hbr_40y) print(output_decomp_CH4_E_Hbr_40y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1))) i = 0 while i < tf: subs_matrix_CH4_E_Hbr_40y[:,i] = np.diff(output_decomp_CH4_E_Hbr_40y[:,i]) i = i + 1 print(subs_matrix_CH4_E_Hbr_40y[:,:4]) print(len(subs_matrix_CH4_E_Hbr_40y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_CH4_E_Hbr_40y = subs_matrix_CH4_E_Hbr_40y.clip(max=0) print(subs_matrix_CH4_E_Hbr_40y[:,:4]) #make the results as absolute values subs_matrix_CH4_E_Hbr_40y = abs(subs_matrix_CH4_E_Hbr_40y) print(subs_matrix_CH4_E_Hbr_40y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values))) print(zero_matrix_CH4_E_Hbr_40y) subs_matrix_CH4_E_Hbr_40y = np.vstack((zero_matrix_CH4_E_Hbr_40y, subs_matrix_CH4_E_Hbr_40y)) print(subs_matrix_CH4_E_Hbr_40y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_CH4_E_Hbr_40y = (tf,1) decomp_tot_CH4_E_Hbr_40y = np.zeros(matrix_tot_CH4_E_Hbr_40y) i = 0 while i < tf: decomp_tot_CH4_E_Hbr_40y[:,0] = decomp_tot_CH4_E_Hbr_40y[:,0] + subs_matrix_CH4_E_Hbr_40y[:,i] i = i + 1 print(decomp_tot_CH4_E_Hbr_40y[:,0]) #plotting t = np.arange(0,tf) plt.plot(t,decomp_tot_CH4_S1_Ac_7y,label='Ac_7y') plt.plot(t,decomp_tot_CH4_S1_Ac_18y,label='Ac_18y') plt.plot(t,decomp_tot_CH4_S1_Tgr_40y,label='Tgr_40y') plt.plot(t,decomp_tot_CH4_S1_Tgr_60y,label='Tgr_60y') plt.plot(t,decomp_tot_CH4_E_Hbr_40y,label='E_Hbr_40y') plt.xlim(0,200) plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) plt.show() #%% #Step (7_2): landfill gas decomposition (CO2) #CO2 decomposition hl = 20 #half-live k = (np.log(2))/hl #S1_Ac_7y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_7y') tf = 201 t = np.arange(tf) def decomp_S1_Ac_7y(t,remainAGB_S1_Ac_7y): return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_7y #set zero matrix output_decomp_S1_Ac_7y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values))) for i,remain_part_S1_Ac_7y in enumerate(df['Landfill_decomp_CO2'].values): #print(i,remain_part) output_decomp_S1_Ac_7y[i:,i] = decomp_S1_Ac_7y(t[:len(t)-i],remain_part_S1_Ac_7y) print(output_decomp_S1_Ac_7y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_S1_Ac_7y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1))) i = 0 while i < tf: subs_matrix_S1_Ac_7y[:,i] = np.diff(output_decomp_S1_Ac_7y[:,i]) i = i + 1 print(subs_matrix_S1_Ac_7y[:,:4]) print(len(subs_matrix_S1_Ac_7y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_S1_Ac_7y = subs_matrix_S1_Ac_7y.clip(max=0) print(subs_matrix_S1_Ac_7y[:,:4]) #make the results as absolute values subs_matrix_S1_Ac_7y = abs(subs_matrix_S1_Ac_7y) print(subs_matrix_S1_Ac_7y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_S1_Ac_7y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values))) print(zero_matrix_S1_Ac_7y) subs_matrix_S1_Ac_7y = np.vstack((zero_matrix_S1_Ac_7y, subs_matrix_S1_Ac_7y)) print(subs_matrix_S1_Ac_7y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_S1_Ac_7y = (tf,1) decomp_tot_CO2_S1_Ac_7y = np.zeros(matrix_tot_S1_Ac_7y) i = 0 while i < tf: decomp_tot_CO2_S1_Ac_7y[:,0] = decomp_tot_CO2_S1_Ac_7y[:,0] + subs_matrix_S1_Ac_7y[:,i] i = i + 1 print(decomp_tot_CO2_S1_Ac_7y[:,0]) #S1_Ac_18y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_18y') tf = 201 t = np.arange(tf) def decomp_S1_Ac_18y(t,remainAGB_S1_Ac_18y): return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_18y #set zero matrix output_decomp_S1_Ac_18y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values))) for i,remain_part_S1_Ac_18y in enumerate(df['Landfill_decomp_CO2'].values): #print(i,remain_part) output_decomp_S1_Ac_18y[i:,i] = decomp_S1_Ac_18y(t[:len(t)-i],remain_part_S1_Ac_18y) print(output_decomp_S1_Ac_18y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_S1_Ac_18y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1))) i = 0 while i < tf: subs_matrix_S1_Ac_18y[:,i] = np.diff(output_decomp_S1_Ac_18y[:,i]) i = i + 1 print(subs_matrix_S1_Ac_18y[:,:4]) print(len(subs_matrix_S1_Ac_18y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_S1_Ac_18y = subs_matrix_S1_Ac_18y.clip(max=0) print(subs_matrix_S1_Ac_18y[:,:4]) #make the results as absolute values subs_matrix_S1_Ac_18y = abs(subs_matrix_S1_Ac_18y) print(subs_matrix_S1_Ac_18y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_S1_Ac_18y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values))) print(zero_matrix_S1_Ac_18y) subs_matrix_S1_Ac_18y = np.vstack((zero_matrix_S1_Ac_18y, subs_matrix_S1_Ac_18y)) print(subs_matrix_S1_Ac_18y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_S1_Ac_18y = (tf,1) decomp_tot_CO2_S1_Ac_18y = np.zeros(matrix_tot_S1_Ac_18y) i = 0 while i < tf: decomp_tot_CO2_S1_Ac_18y[:,0] = decomp_tot_CO2_S1_Ac_18y[:,0] + subs_matrix_S1_Ac_18y[:,i] i = i + 1 print(decomp_tot_CO2_S1_Ac_18y[:,0]) #S1_Tgr_40y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') tf = 201 t = np.arange(tf) def decomp_S1_Tgr_40y(t,remainAGB_S1_Tgr_40y): return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_40y #set zero matrix output_decomp_S1_Tgr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values))) for i,remain_part_S1_Tgr_40y in enumerate(df['Landfill_decomp_CO2'].values): #print(i,remain_part) output_decomp_S1_Tgr_40y[i:,i] = decomp_S1_Tgr_40y(t[:len(t)-i],remain_part_S1_Tgr_40y) print(output_decomp_S1_Tgr_40y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_S1_Tgr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1))) i = 0 while i < tf: subs_matrix_S1_Tgr_40y[:,i] = np.diff(output_decomp_S1_Tgr_40y[:,i]) i = i + 1 print(subs_matrix_S1_Tgr_40y[:,:4]) print(len(subs_matrix_S1_Tgr_40y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_S1_Tgr_40y = subs_matrix_S1_Tgr_40y.clip(max=0) print(subs_matrix_S1_Tgr_40y[:,:4]) #make the results as absolute values subs_matrix_S1_Tgr_40y = abs(subs_matrix_S1_Tgr_40y) print(subs_matrix_S1_Tgr_40y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_S1_Tgr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values))) print(zero_matrix_S1_Tgr_40y) subs_matrix_S1_Tgr_40y = np.vstack((zero_matrix_S1_Tgr_40y, subs_matrix_S1_Tgr_40y)) print(subs_matrix_S1_Tgr_40y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_S1_Tgr_40y = (tf,1) decomp_tot_CO2_S1_Tgr_40y = np.zeros(matrix_tot_S1_Tgr_40y) i = 0 while i < tf: decomp_tot_CO2_S1_Tgr_40y[:,0] = decomp_tot_CO2_S1_Tgr_40y[:,0] + subs_matrix_S1_Tgr_40y[:,i] i = i + 1 print(decomp_tot_CO2_S1_Tgr_40y[:,0]) #S2_Tgr_60y df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') tf = 201 t = np.arange(tf) def decomp_S1_Tgr_60y(t,remainAGB_S1_Tgr_60y): return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_60y #set zero matrix output_decomp_S1_Tgr_60y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values))) for i,remain_part_S1_Tgr_60y in enumerate(df['Landfill_decomp_CO2'].values): #print(i,remain_part) output_decomp_S1_Tgr_60y[i:,i] = decomp_S1_Tgr_60y(t[:len(t)-i],remain_part_S1_Tgr_60y) print(output_decomp_S1_Tgr_60y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_S1_Tgr_60y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1))) i = 0 while i < tf: subs_matrix_S1_Tgr_60y[:,i] = np.diff(output_decomp_S1_Tgr_60y[:,i]) i = i + 1 print(subs_matrix_S1_Tgr_60y[:,:4]) print(len(subs_matrix_S1_Tgr_60y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_S1_Tgr_60y = subs_matrix_S1_Tgr_60y.clip(max=0) print(subs_matrix_S1_Tgr_60y[:,:4]) #make the results as absolute values subs_matrix_S1_Tgr_60y = abs(subs_matrix_S1_Tgr_60y) print(subs_matrix_S1_Tgr_60y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values))) print(zero_matrix_S1_Tgr_60y) subs_matrix_S1_Tgr_60y = np.vstack((zero_matrix_S1_Tgr_60y, subs_matrix_S1_Tgr_60y)) print(subs_matrix_S1_Tgr_60y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_S1_Tgr_60y = (tf,1) decomp_tot_CO2_S1_Tgr_60y = np.zeros(matrix_tot_S1_Tgr_60y) i = 0 while i < tf: decomp_tot_CO2_S1_Tgr_60y[:,0] = decomp_tot_CO2_S1_Tgr_60y[:,0] + subs_matrix_S1_Tgr_60y[:,i] i = i + 1 print(decomp_tot_CO2_S1_Tgr_60y[:,0]) #E df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') tf = 201 t = np.arange(tf) def decomp_E_Hbr_40y(t,remainAGB_E_Hbr_40y): return (1-(1-np.exp(-k*t)))*remainAGB_E_Hbr_40y #set zero matrix output_decomp_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values))) for i,remain_part_E_Hbr_40y in enumerate(df['Landfill_decomp_CO2'].values): #print(i,remain_part) output_decomp_E_Hbr_40y[i:,i] = decomp_E_Hbr_40y(t[:len(t)-i],remain_part_E_Hbr_40y) print(output_decomp_E_Hbr_40y[:,:4]) #find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1' #(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list) # https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements #difference between element, subs_matrix_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1))) i = 0 while i < tf: subs_matrix_E_Hbr_40y[:,i] = np.diff(output_decomp_E_Hbr_40y[:,i]) i = i + 1 print(subs_matrix_E_Hbr_40y[:,:4]) print(len(subs_matrix_E_Hbr_40y)) #since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward), #we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913) subs_matrix_E_Hbr_40y = subs_matrix_E_Hbr_40y.clip(max=0) print(subs_matrix_E_Hbr_40y[:,:4]) #make the results as absolute values subs_matrix_E_Hbr_40y = abs(subs_matrix_E_Hbr_40y) print(subs_matrix_E_Hbr_40y[:,:4]) #insert row of zeros into the first row of the subs_matrix zero_matrix_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values))) print(zero_matrix_E_Hbr_40y) subs_matrix_E_Hbr_40y = np.vstack((zero_matrix_E_Hbr_40y, subs_matrix_E_Hbr_40y)) print(subs_matrix_E_Hbr_40y[:,:4]) #sum every column of the subs_matrix into one vector matrix matrix_tot_E_Hbr_40y = (tf,1) decomp_tot_CO2_E_Hbr_40y = np.zeros(matrix_tot_E_Hbr_40y) i = 0 while i < tf: decomp_tot_CO2_E_Hbr_40y[:,0] = decomp_tot_CO2_E_Hbr_40y[:,0] + subs_matrix_E_Hbr_40y[:,i] i = i + 1 print(decomp_tot_CO2_E_Hbr_40y[:,0]) #plotting t = np.arange(0,tf) plt.plot(t,decomp_tot_CO2_S1_Ac_7y,label='Ac_7y') plt.plot(t,decomp_tot_CO2_S1_Ac_18y,label='Ac_18y') plt.plot(t,decomp_tot_CO2_S1_Tgr_40y,label='Tgr_40y') plt.plot(t,decomp_tot_CO2_S1_Tgr_60y,label='Tgr_60y') plt.plot(t,decomp_tot_CO2_E_Hbr_40y,label='E_Hbr_40y') plt.xlim(0,200) plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) plt.show() #%% #Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated #https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two #C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO Emissions_S1_Ac_7y = [c_firewood_energy_S1_Ac7, decomp_tot_S1_Ac_7y, TestDSM1_Ac7.o, PH_Emissions_HWP1_Ac_7y, decomp_tot_CO2_S1_Ac_7y[:,0]] Emissions_S1_Ac_18y = [c_firewood_energy_S1_Ac18, decomp_tot_S1_Ac_18y, TestDSM1_Ac18.o, PH_Emissions_HWP1_Ac_18y, decomp_tot_CO2_S1_Ac_18y[:,0]] Emissions_S1_Tgr_40y = [c_firewood_energy_S1_Tgr40, decomp_tot_S1_Tgr_40y, TestDSM1_Tgr40.o, PH_Emissions_HWP1_Tgr_40y, decomp_tot_CO2_S1_Tgr_40y[:,0]] Emissions_S1_Tgr_60y = [c_firewood_energy_S1_Tgr60, decomp_tot_S1_Tgr_60y, TestDSM1_Tgr60.o, PH_Emissions_HWP1_Tgr_60y, decomp_tot_CO2_S1_Tgr_60y[:,0]] Emissions_E_Hbr_40y = [c_firewood_energy_E_Hbr40, c_pellets_Hbr_40y, decomp_tot_E_Hbr_40y, TestDSME_Hbr40.o, PH_Emissions_HWPE_Hbr_40y, decomp_tot_CO2_E_Hbr_40y[:,0]] Emissions_PF_FP_S1_Ac_7y = [sum(x) for x in zip(*Emissions_S1_Ac_7y)] Emissions_PF_FP_S1_Ac_18y = [sum(x) for x in zip(*Emissions_S1_Ac_18y)] Emissions_PF_FP_S1_Tgr_40y = [sum(x) for x in zip(*Emissions_S1_Tgr_40y)] Emissions_PF_FP_S1_Tgr_60y = [sum(x) for x in zip(*Emissions_S1_Tgr_60y)] Emissions_PF_FP_E_Hbr_40y = [sum(x) for x in zip(*Emissions_E_Hbr_40y)] #CH4_S1_Ac_7y Emissions_CH4_PF_FP_S1_Ac_7y = decomp_tot_CH4_S1_Ac_7y[:,0] #CH4_S1_Ac_18y Emissions_CH4_PF_FP_S1_Ac_18y = decomp_tot_CH4_S1_Ac_18y[:,0] #CH4_S1_Tgr_40y Emissions_CH4_PF_FP_S1_Tgr_40y = decomp_tot_CH4_S1_Tgr_40y[:,0] #CH4_S1_Tgr_60y Emissions_CH4_PF_FP_S1_Tgr_60y = decomp_tot_CH4_S1_Tgr_60y[:,0] #CH4_E_Hbr_40y Emissions_CH4_PF_FP_E_Hbr_40y = decomp_tot_CH4_E_Hbr_40y[:,0] #%% #Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation #print year column year = [] for x in range (0, tf): year.append(x) print (year) #print CH4 emission column import itertools lst = [0] Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst)) print(Emissions_CH4) #print emission ref lst1 = [0] Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1)) print(Emission_ref) #replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation) Emission_ref[0] = 1 print(Emission_ref) Col1 = year Col2_S1_Ac_7y = Emissions_PF_FP_S1_Ac_7y Col2_S1_Ac_18y = Emissions_PF_FP_S1_Ac_18y Col2_S1_Tgr_40y = Emissions_PF_FP_S1_Tgr_40y Col2_S1_Tgr_60y = Emissions_PF_FP_S1_Tgr_60y Col2_E_Hbr_40y = Emissions_PF_FP_E_Hbr_40y Col3_S1_Ac_7y = Emissions_CH4_PF_FP_S1_Ac_7y Col3_S1_Ac_18y = Emissions_CH4_PF_FP_S1_Ac_18y Col3_S1_Tgr_40y = Emissions_CH4_PF_FP_S1_Tgr_40y Col3_S1_Tgr_60y = Emissions_CH4_PF_FP_S1_Tgr_60y Col3_E_Hbr_40y = Emissions_CH4_PF_FP_E_Hbr_40y Col4 = Emission_ref Col5 = flat_list_Ac_7y Col6 = flat_list_Ac_18y Col7 = flat_list_Tgr_40y Col8 = flat_list_Tgr_60y Col9 = flat_list_Hbr_40y #A. crassicarpa df1_Ac_7y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_7y,'kg_CH4':Col3_S1_Ac_7y,'kg_CO2_seq':Col5,'emission_ref':Col4}) df1_Ac_18y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_18y,'kg_CH4':Col3_S1_Ac_18y,'kg_CO2_seq':Col6,'emission_ref':Col4}) #T. grandis df1_Tgr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_40y,'kg_CH4':Col3_S1_Tgr_40y,'kg_CO2_seq':Col7,'emission_ref':Col4}) df1_Tgr_60y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_60y,'kg_CH4':Col3_S1_Tgr_60y,'kg_CO2_seq':Col8,'emission_ref':Col4}) #H. brasiliensis dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E_Hbr_40y,'kg_CH4':Col3_E_Hbr_40y,'kg_CO2_seq':Col9,'emission_ref':Col4}) writer = pd.ExcelWriter('emissions_seq_PF_FP_EC_RB.xlsx', engine = 'xlsxwriter') df1_Ac_7y.to_excel(writer, sheet_name = 'PF_FP_S1_Ac_7y', header=True, index=False ) df1_Ac_18y.to_excel(writer, sheet_name = 'PF_FP_S1_Ac_18y', header=True, index=False) df1_Tgr_40y.to_excel(writer, sheet_name = 'PF_FP_S1_Tgr_40y', header=True, index=False) df1_Tgr_60y.to_excel(writer, sheet_name = 'PF_FP_S1_Tgr_60y', header=True, index=False) dfE_Hbr_40y.to_excel(writer, sheet_name = 'PF_FP_E_Hbr_40y', header=True, index=False) writer.save() writer.close() #%% ## DYNAMIC LCA - wood-based scenarios # Step (10): Set General Parameters for Dynamic LCA calculation # General Parameters aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4] TauCH4 = 12; # methane - lifetime (years) aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2] TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0)) #%% #Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4) t = range(0,tf,1) ## CO2 calculation formula # time dependant atmospheric load for CO2, Bern model def C_CO2(t): return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2]) output_CO2 = np.array([C_CO2(ti) for ti in t]) print(output_CO2) ## CH4 calculation formula # time dependant atmospheric load for non-CO2 GHGs (Methane) def C_CH4(t): return np.exp(-t/TauCH4) output_CH4 = np.array([C_CH4(ti) for ti in t]) plt.xlim([0, 200]) plt.ylim([0,1.1]) plt.plot(t, output_CO2, output_CH4) plt.xlabel('Time (year)') plt.ylabel('Fraction of CO$_2$') plt.show() output_CH4.size #%% #determine the C(t) for CO2 s = [] t = np.arange(0,tf,1) for i in t: s.append(quad(C_CO2,i-1,i)) res_list_CO2 = [x[0] for x in s] len(res_list_CO2) #%% #determine the C(t) for CH4 s = [] for i in t: s.append(quad(C_CH4,i-1,i)) res_list_CH4 = [p[0] for p in s] #plot plt.xlim([0, 200]) plt.ylim([0,1.5]) plt.plot(t, res_list_CO2, res_list_CH4) plt.show() #%% #Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4 DCF_inst_CO2 = aCO2 * np.array(res_list_CO2) print(DCF_inst_CO2) DCF_inst_CH4 = aCH4 * np.array(res_list_CH4) plt.xlim([0, 200]) plt.ylim([0,4e-15]) plt.plot(t, DCF_inst_CO2, DCF_inst_CH4) plt.xlabel('Time (year)') plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)') plt.show() len(DCF_inst_CO2) #%% #Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9)) ##wood-based #read S1_Ac_7y df = pd.read_excel('emissions_seq_PF_FP_EC_RB.xlsx', 'PF_FP_S1_Ac_7y') # can also index sheet by name or fetch all sheets emission_CO2_S1_Ac_7y = df['kg_CO2'].tolist() emission_CH4_S1_Ac_7y = df['kg_CH4'].tolist() emission_CO2_seq_S1_Ac_7y = df['kg_CO2_seq'].tolist() emission_CO2_ref = df['emission_ref'].tolist() #read S1_Ac_18y df = pd.read_excel('emissions_seq_PF_FP_EC_RB.xlsx', 'PF_FP_S1_Ac_18y') emission_CO2_S1_Ac_18y = df['kg_CO2'].tolist() emission_CH4_S1_Ac_18y = df['kg_CH4'].tolist() emission_CO2_seq_S1_Ac_18y = df['kg_CO2_seq'].tolist() #read S1_Tgr_40y df = pd.read_excel('emissions_seq_PF_FP_EC_RB.xlsx', 'PF_FP_S1_Tgr_40y') # can also index sheet by name or fetch all sheets emission_CO2_S1_Tgr_40y = df['kg_CO2'].tolist() emission_CH4_S1_Tgr_40y = df['kg_CH4'].tolist() emission_CO2_seq_S1_Tgr_40y = df['kg_CO2_seq'].tolist() #read S1_Tgr_60y df = pd.read_excel('emissions_seq_PF_FP_EC_RB.xlsx', 'PF_FP_S1_Tgr_60y') emission_CO2_S1_Tgr_60y = df['kg_CO2'].tolist() emission_CH4_S1_Tgr_60y = df['kg_CH4'].tolist() emission_CO2_seq_S1_Tgr_60y = df['kg_CO2_seq'].tolist() #read E_Hbr_40y df = pd.read_excel('emissions_seq_PF_FP_EC_RB.xlsx', 'PF_FP_E_Hbr_40y') # can also index sheet by name or fetch all sheets emission_CO2_E_Hbr_40y = df['kg_CO2'].tolist() emission_CH4_E_Hbr_40y = df['kg_CH4'].tolist() emission_CO2_seq_E_Hbr_40y = df['kg_CO2_seq'].tolist() #%% #Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR) #read S1_Ac_7y df = pd.read_excel('NonRW_PF_FP.xlsx', 'PF_FP_S1_Ac_7y') emissions_NonRW_S1_Ac_7y = df['NonRW_emissions'].tolist() emissions_NonRW_S1_Ac_7y_seq = df['kg_CO2_seq'].tolist() emission_CO2_ref = df['emission_ref'].tolist() #read S1_Ac_18y df = pd.read_excel('NonRW_PF_FP.xlsx', 'PF_FP_S1_Ac_18y') emissions_NonRW_S1_Ac_18y = df['NonRW_emissions'].tolist() emissions_NonRW_S1_Ac_18y_seq = df['kg_CO2_seq'].tolist() #read S1_Tgr_40y df = pd.read_excel('NonRW_PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') # can also index sheet by name or fetch all sheets emissions_NonRW_S1_Tgr_40y = df['NonRW_emissions'].tolist() emissions_NonRW_S1_Tgr_40y_seq = df['kg_CO2_seq'].tolist() #read S1_Tgr_60y df = pd.read_excel('NonRW_PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') emissions_NonRW_S1_Tgr_60y = df['NonRW_emissions'].tolist() emissions_NonRW_S1_Tgr_60y_seq = df['kg_CO2_seq'].tolist() #read E_Hbr_40y df = pd.read_excel('NonRW_PF_FP.xlsx', 'PF_FP_E_Hbr_40y') # can also index sheet by name or fetch all sheets emissions_NonRW_E_Hbr_40y = df['NonRW_emissions'].tolist() emissions_NonRW_E_Hbr_40y_seq = df['kg_CO2_seq'].tolist() #%% #Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4 #DCF(t-i) CO2 matrix = (tf-1,tf-1) DCF_CO2_ti = np.zeros(matrix) for t in range(0,tf-1): i = -1 while i < t: DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i] i = i + 1 print(DCF_CO2_ti) #sns.heatmap(DCF_CO2_ti) DCF_CO2_ti.shape #DCF(t-i) CH4 matrix = (tf-1,tf-1) DCF_CH4_ti = np.zeros(matrix) for t in range(0,tf-1): i = -1 while i < t: DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i] i = i + 1 print(DCF_CH4_ti) #sns.heatmap(DCF_CH4_ti) DCF_CH4_ti.shape #%% # Step (16): Calculate instantaneous global warming impact (GWI) #wood-based #S1_Ac_7y t = np.arange(0,tf-1,1) matrix_GWI_S1_Ac_7y = (tf-1,3) GWI_inst_S1_Ac_7y = np.zeros(matrix_GWI_S1_Ac_7y) for t in range(0,tf-1): GWI_inst_S1_Ac_7y[t,0] = np.sum(np.multiply(emission_CO2_S1_Ac_7y,DCF_CO2_ti[:,t])) GWI_inst_S1_Ac_7y[t,1] = np.sum(np.multiply(emission_CH4_S1_Ac_7y,DCF_CH4_ti[:,t])) GWI_inst_S1_Ac_7y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Ac_7y,DCF_CO2_ti[:,t])) matrix_GWI_tot_S1_Ac_7y = (tf-1,1) GWI_inst_tot_S1_Ac_7y = np.zeros(matrix_GWI_tot_S1_Ac_7y) GWI_inst_tot_S1_Ac_7y[:,0] = np.array(GWI_inst_S1_Ac_7y[:,0] + GWI_inst_S1_Ac_7y[:,1] + GWI_inst_S1_Ac_7y[:,2]) print(GWI_inst_tot_S1_Ac_7y[:,0]) t = np.arange(0,tf-1,1) #S1_Ac_18y t = np.arange(0,tf-1,1) matrix_GWI_S1_Ac_18y = (tf-1,3) GWI_inst_S1_Ac_18y = np.zeros(matrix_GWI_S1_Ac_18y) for t in range(0,tf-1): GWI_inst_S1_Ac_18y[t,0] = np.sum(np.multiply(emission_CO2_S1_Ac_18y,DCF_CO2_ti[:,t])) GWI_inst_S1_Ac_18y[t,1] = np.sum(np.multiply(emission_CH4_S1_Ac_18y,DCF_CH4_ti[:,t])) GWI_inst_S1_Ac_18y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Ac_18y,DCF_CO2_ti[:,t])) matrix_GWI_tot_S1_Ac_18y = (tf-1,1) GWI_inst_tot_S1_Ac_18y = np.zeros(matrix_GWI_tot_S1_Ac_18y) GWI_inst_tot_S1_Ac_18y[:,0] = np.array(GWI_inst_S1_Ac_18y[:,0] + GWI_inst_S1_Ac_18y[:,1] + GWI_inst_S1_Ac_18y[:,2]) print(GWI_inst_tot_S1_Ac_18y[:,0]) #S1_Tgr_40y t = np.arange(0,tf-1,1) matrix_GWI_S1_Tgr_40y = (tf-1,3) GWI_inst_S1_Tgr_40y = np.zeros(matrix_GWI_S1_Tgr_40y) for t in range(0,tf-1): GWI_inst_S1_Tgr_40y[t,0] = np.sum(np.multiply(emission_CO2_S1_Tgr_40y,DCF_CO2_ti[:,t])) GWI_inst_S1_Tgr_40y[t,1] = np.sum(np.multiply(emission_CH4_S1_Tgr_40y,DCF_CH4_ti[:,t])) GWI_inst_S1_Tgr_40y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Tgr_40y,DCF_CO2_ti[:,t])) matrix_GWI_tot_S1_Tgr_40y = (tf-1,1) GWI_inst_tot_S1_Tgr_40y = np.zeros(matrix_GWI_tot_S1_Tgr_40y) GWI_inst_tot_S1_Tgr_40y[:,0] = np.array(GWI_inst_S1_Tgr_40y[:,0] + GWI_inst_S1_Tgr_40y[:,1] + GWI_inst_S1_Tgr_40y[:,2]) print(GWI_inst_tot_S1_Tgr_40y[:,0]) #S1_Tgr_60y t = np.arange(0,tf-1,1) matrix_GWI_S1_Tgr_60y = (tf-1,3) GWI_inst_S1_Tgr_60y = np.zeros(matrix_GWI_S1_Tgr_60y) for t in range(0,tf-1): GWI_inst_S1_Tgr_60y[t,0] = np.sum(np.multiply(emission_CO2_S1_Tgr_60y,DCF_CO2_ti[:,t])) GWI_inst_S1_Tgr_60y[t,1] = np.sum(np.multiply(emission_CH4_S1_Tgr_60y,DCF_CH4_ti[:,t])) GWI_inst_S1_Tgr_60y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Tgr_60y,DCF_CO2_ti[:,t])) matrix_GWI_tot_S1_Tgr_60y = (tf-1,1) GWI_inst_tot_S1_Tgr_60y = np.zeros(matrix_GWI_tot_S1_Tgr_60y) GWI_inst_tot_S1_Tgr_60y[:,0] = np.array(GWI_inst_S1_Tgr_60y[:,0] + GWI_inst_S1_Tgr_60y[:,1] + GWI_inst_S1_Tgr_60y[:,2]) print(GWI_inst_tot_S1_Tgr_60y[:,0]) #E_Hbr_40y t = np.arange(0,tf-1,1) matrix_GWI_E_Hbr_40y = (tf-1,3) GWI_inst_E_Hbr_40y = np.zeros(matrix_GWI_E_Hbr_40y) for t in range(0,tf-1): GWI_inst_E_Hbr_40y[t,0] = np.sum(np.multiply(emission_CO2_E_Hbr_40y,DCF_CO2_ti[:,t])) GWI_inst_E_Hbr_40y[t,1] = np.sum(np.multiply(emission_CH4_E_Hbr_40y,DCF_CH4_ti[:,t])) GWI_inst_E_Hbr_40y[t,2] = np.sum(np.multiply(emission_CO2_seq_E_Hbr_40y,DCF_CO2_ti[:,t])) matrix_GWI_tot_E_Hbr_40y = (tf-1,1) GWI_inst_tot_E_Hbr_40y = np.zeros(matrix_GWI_tot_E_Hbr_40y) GWI_inst_tot_E_Hbr_40y[:,0] = np.array(GWI_inst_E_Hbr_40y[:,0] + GWI_inst_E_Hbr_40y[:,1] + GWI_inst_E_Hbr_40y[:,2]) print(GWI_inst_tot_E_Hbr_40y[:,0]) ##NonRW #S1_Ac_7y t = np.arange(0,tf-1,1) matrix_GWI_NonRW_S1_Ac_7y = (tf-1,2) GWI_inst_NonRW_S1_Ac_7y = np.zeros(matrix_GWI_NonRW_S1_Ac_7y) for t in range(0,tf-1): GWI_inst_NonRW_S1_Ac_7y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Ac_7y,DCF_CO2_ti[:,t])) GWI_inst_NonRW_S1_Ac_7y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Ac_7y_seq,DCF_CO2_ti[:,t])) matrix_GWI_tot_NonRW_S1_Ac_7y = (tf-1,1) GWI_inst_tot_NonRW_S1_Ac_7y = np.zeros(matrix_GWI_tot_NonRW_S1_Ac_7y) GWI_inst_tot_NonRW_S1_Ac_7y[:,0] = np.array(GWI_inst_NonRW_S1_Ac_7y[:,0] + GWI_inst_NonRW_S1_Ac_7y[:,1]) print(GWI_inst_tot_NonRW_S1_Ac_7y[:,0]) #S1_Ac_18y t = np.arange(0,tf-1,1) matrix_GWI_NonRW_S1_Ac_18y = (tf-1,2) GWI_inst_NonRW_S1_Ac_18y = np.zeros(matrix_GWI_NonRW_S1_Ac_18y) for t in range(0,tf-1): GWI_inst_NonRW_S1_Ac_18y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Ac_18y,DCF_CO2_ti[:,t])) GWI_inst_NonRW_S1_Ac_18y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Ac_18y_seq,DCF_CO2_ti[:,t])) matrix_GWI_tot_NonRW_S1_Ac_18y = (tf-1,1) GWI_inst_tot_NonRW_S1_Ac_18y = np.zeros(matrix_GWI_tot_NonRW_S1_Ac_18y) GWI_inst_tot_NonRW_S1_Ac_18y[:,0] = np.array(GWI_inst_NonRW_S1_Ac_18y[:,0] + GWI_inst_NonRW_S1_Ac_18y[:,1]) print(GWI_inst_tot_NonRW_S1_Ac_18y[:,0]) #S1_Tgr_40y t = np.arange(0,tf-1,1) matrix_GWI_NonRW_S1_Tgr_40y = (tf-1,2) GWI_inst_NonRW_S1_Tgr_40y = np.zeros(matrix_GWI_NonRW_S1_Tgr_40y) for t in range(0,tf-1): GWI_inst_NonRW_S1_Tgr_40y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_40y,DCF_CO2_ti[:,t])) GWI_inst_NonRW_S1_Tgr_40y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_40y_seq,DCF_CO2_ti[:,t])) matrix_GWI_tot_NonRW_S1_Tgr_40y = (tf-1,1) GWI_inst_tot_NonRW_S1_Tgr_40y = np.zeros(matrix_GWI_tot_NonRW_S1_Tgr_40y) GWI_inst_tot_NonRW_S1_Tgr_40y[:,0] = np.array(GWI_inst_NonRW_S1_Tgr_40y[:,0] + GWI_inst_NonRW_S1_Tgr_40y[:,1]) print(GWI_inst_tot_NonRW_S1_Tgr_40y[:,0]) #S1_Tgr_60y t = np.arange(0,tf-1,1) matrix_GWI_NonRW_S1_Tgr_60y = (tf-1,2) GWI_inst_NonRW_S1_Tgr_60y = np.zeros(matrix_GWI_NonRW_S1_Tgr_60y) for t in range(0,tf-1): GWI_inst_NonRW_S1_Tgr_60y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_60y,DCF_CO2_ti[:,t])) GWI_inst_NonRW_S1_Tgr_60y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_60y_seq,DCF_CO2_ti[:,t])) matrix_GWI_tot_NonRW_S1_Tgr_60y = (tf-1,1) GWI_inst_tot_NonRW_S1_Tgr_60y = np.zeros(matrix_GWI_tot_NonRW_S1_Tgr_60y) GWI_inst_tot_NonRW_S1_Tgr_60y[:,0] = np.array(GWI_inst_NonRW_S1_Tgr_60y[:,0] + GWI_inst_NonRW_S1_Tgr_60y[:,1]) print(GWI_inst_tot_NonRW_S1_Tgr_60y[:,0]) #E_Hbr_40y t = np.arange(0,tf-1,1) matrix_GWI_NonRW_E_Hbr_40y = (tf-1,2) GWI_inst_NonRW_E_Hbr_40y = np.zeros(matrix_GWI_NonRW_E_Hbr_40y) for t in range(0,tf-1): GWI_inst_NonRW_E_Hbr_40y[t,0] = np.sum(np.multiply(emissions_NonRW_E_Hbr_40y,DCF_CO2_ti[:,t])) GWI_inst_NonRW_E_Hbr_40y[t,1] = np.sum(np.multiply(emissions_NonRW_E_Hbr_40y_seq,DCF_CO2_ti[:,t])) matrix_GWI_tot_NonRW_E_Hbr_40y = (tf-1,1) GWI_inst_tot_NonRW_E_Hbr_40y = np.zeros(matrix_GWI_tot_NonRW_E_Hbr_40y) GWI_inst_tot_NonRW_E_Hbr_40y[:,0] = np.array(GWI_inst_NonRW_E_Hbr_40y[:,0] + GWI_inst_NonRW_E_Hbr_40y[:,1]) print(GWI_inst_tot_NonRW_E_Hbr_40y[:,0]) t = np.arange(0,tf-1,1) #create zero list to highlight the horizontal line for 0 def zerolistmaker(n): listofzeros = [0] * (n) return listofzeros #convert to flat list GWI_inst_tot_NonRW_S1_Ac_7y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Ac_7y for item in sublist]) GWI_inst_tot_NonRW_S1_Ac_18y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Ac_18y for item in sublist]) GWI_inst_tot_NonRW_S1_Tgr_60y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Tgr_60y for item in sublist]) GWI_inst_tot_NonRW_E_Hbr_40y = np.array([item for sublist in GWI_inst_tot_NonRW_E_Hbr_40y for item in sublist]) GWI_inst_tot_S1_Ac_7y = np.array([item for sublist in GWI_inst_tot_S1_Ac_7y for item in sublist]) GWI_inst_tot_S1_Ac_18y = np.array([item for sublist in GWI_inst_tot_S1_Ac_18y for item in sublist]) GWI_inst_tot_S1_Tgr_60y = np.array([item for sublist in GWI_inst_tot_S1_Tgr_60y for item in sublist]) GWI_inst_tot_E_Hbr_40y = np.array([item for sublist in GWI_inst_tot_E_Hbr_40y for item in sublist]) plt.plot(t, GWI_inst_tot_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55) plt.plot(t, GWI_inst_tot_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55) #plt.plot(t, GWI_inst_tot_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55) plt.plot(t, GWI_inst_tot_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55) plt.plot(t, GWI_inst_tot_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55) plt.plot(t, GWI_inst_tot_S1_Ac_7y, color='olive', label='M_EC_Ac_7y') plt.plot(t, GWI_inst_tot_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y') #plt.plot(t, GWI_inst_tot_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y') plt.plot(t, GWI_inst_tot_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y') plt.plot(t, GWI_inst_tot_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y') plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75) #plt.fill_between(t, GWI_inst_tot_NonRW_E_Hbr_40y, GWI_inst_tot_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3) #plt.fill_between(t, GWI_inst_tot_NonRW_S1_Ac_7y, GWI_inst_tot_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3) plt.grid(True) plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) plt.xlim(0,200) plt.ylim(-1e-9,1.4e-9) plt.title('Instantaneous GWI, PF_FP_EC') plt.xlabel('Time (year)') #plt.ylabel('GWI_inst (10$^{-12}$ W/m$^2$)') plt.ylabel('GWI_inst (W/m$^2$)')# plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_inst_NonRW_PF_FP_S1', dpi=300) plt.show() len(GWI_inst_tot_NonRW_S1_Ac_18y) #%% #Step (17): Calculate cumulative global warming impact (GWI) ##wood-based GWI_cum_S1_Ac_7y = np.cumsum(GWI_inst_tot_S1_Ac_7y) GWI_cum_S1_Ac_18y = np.cumsum(GWI_inst_tot_S1_Ac_18y) GWI_cum_S1_Tgr_40y = np.cumsum(GWI_inst_tot_S1_Tgr_40y) GWI_cum_S1_Tgr_60y = np.cumsum(GWI_inst_tot_S1_Tgr_60y) GWI_cum_E_Hbr_40y = np.cumsum(GWI_inst_tot_E_Hbr_40y) ##NonRW #GWI_cumulative --> check again! try to run it with only materials case GWI_cum_NonRW_S1_Ac_7y = np.cumsum(GWI_inst_tot_NonRW_S1_Ac_7y) GWI_cum_NonRW_S1_Ac_18y = np.cumsum(GWI_inst_tot_NonRW_S1_Ac_18y) GWI_cum_NonRW_S1_Tgr_40y = np.cumsum(GWI_inst_tot_NonRW_S1_Tgr_40y) GWI_cum_NonRW_S1_Tgr_60y = np.cumsum(GWI_inst_tot_NonRW_S1_Tgr_60y) GWI_cum_NonRW_E_Hbr_40y = np.cumsum(GWI_inst_tot_NonRW_E_Hbr_40y) #print(GWI_cum_NonRW_S1_Ac_18y) plt.xlabel('Time (year)') #plt.ylabel('GWI_cum (10$^{-10}$ W/m$^2$)') plt.ylabel('GWI_cum (W/m$^2$)') plt.xlim(0,200) plt.ylim(-1e-7,1.5e-7) plt.title('Cumulative GWI, PF_FP_EC') plt.plot(t, GWI_cum_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55) plt.plot(t, GWI_cum_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55) #plt.plot(t, GWI_cum_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55) plt.plot(t, GWI_cum_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55) plt.plot(t, GWI_cum_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55) plt.plot(t, GWI_cum_S1_Ac_7y, color='olive', label='M_EC_Ac_7y') plt.plot(t, GWI_cum_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y') #plt.plot(t, GWI_cum_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y') plt.plot(t, GWI_cum_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y') plt.plot(t, GWI_cum_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y') plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75) #plt.fill_between(t, GWI_cum_NonRW_S1_Tgr_60y, GWI_cum_NonRW_S1_Ac_7y, color='lightcoral', alpha=0.3) #plt.fill_between(t, GWI_cum_NonRW_E_Hbr_40y, GWI_cum_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3) plt.grid(True) plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_cum_NonRW_PF_FP_EC', dpi=300) plt.show() len(GWI_cum_NonRW_S1_Ac_18y) #%% #Step (18): Determine the Instantenous and Cumulative GWI for the emission reference (1 kg CO2 emission at time zero) before performing dynamic GWP calculation t = np.arange(0,tf-1,1) matrix_GWI_ref = (tf-1,1) GWI_inst_ref = np.zeros(matrix_GWI_ref) for t in range(0,tf-1): GWI_inst_ref[t,0] = np.sum(np.multiply(emission_CO2_ref,DCF_CO2_ti[:,t])) #print(GWI_inst_ref[:,0]) len(GWI_inst_ref) #determine the GWI cumulative for the emission reference t = np.arange(0,tf-1,1) GWI_cum_ref = np.cumsum(GWI_inst_ref[:,0]) #print(GWI_cum_ref) plt.xlabel('Time (year)') plt.ylabel('GWI_cum_ref (10$^{-13}$ W/m$^2$.kgCO$_2$)') plt.plot(t, GWI_cum_ref) len(GWI_cum_ref) #%% #Step (19): Calculate dynamic global warming potential (GWPdyn) ##wood-based GWP_dyn_cum_S1_Ac_7y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Ac_7y, GWI_cum_ref)] GWP_dyn_cum_S1_Ac_18y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Ac_18y, GWI_cum_ref)] GWP_dyn_cum_S1_Tgr_40y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Tgr_40y, GWI_cum_ref)] GWP_dyn_cum_S1_Tgr_60y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Tgr_60y, GWI_cum_ref)] GWP_dyn_cum_E_Hbr_40y = [x/(y*1000) for x,y in zip(GWI_cum_E_Hbr_40y, GWI_cum_ref)] ##NonRW GWP_dyn_cum_NonRW_S1_Ac_7y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Ac_7y, GWI_cum_ref)] GWP_dyn_cum_NonRW_S1_Ac_18y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Ac_18y, GWI_cum_ref)] GWP_dyn_cum_NonRW_S1_Tgr_40y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Tgr_40y, GWI_cum_ref)] GWP_dyn_cum_NonRW_S1_Tgr_60y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Tgr_60y, GWI_cum_ref)] GWP_dyn_cum_NonRW_E_Hbr_40y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_E_Hbr_40y, GWI_cum_ref)] #print(GWP_dyn_cum_NonRW_S1_Ac_18y) fig=plt.figure() fig.show() ax=fig.add_subplot(111) ax.plot(t, GWP_dyn_cum_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55) ax.plot(t, GWP_dyn_cum_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55) #ax.plot(t, GWP_dyn_cum_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55) ax.plot(t, GWP_dyn_cum_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55) ax.plot(t, GWP_dyn_cum_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55) ax.plot(t, GWP_dyn_cum_S1_Ac_7y, color='olive', label='M_EC_Ac_7y') ax.plot(t, GWP_dyn_cum_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y') #ax.plot(t, GWP_dyn_cum_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y') ax.plot(t, GWP_dyn_cum_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y') ax.plot(t, GWP_dyn_cum_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y') ax.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75) #plt.fill_between(t, GWP_dyn_cum_NonRW_S1_Ac_7y, GWP_dyn_cum_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3) #plt.fill_between(t, GWP_dyn_cum_NonRW_E_Hbr_40y, GWP_dyn_cum_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3) plt.grid(True) ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) ax.set_xlim(0,200) ax.set_ylim(-750,1000) #ax.set_ylim(-250,1500) ax.set_xlabel('Time (year)') ax.set_ylabel('GWP$_{dyn}$ (t-CO$_2$-eq)') ax.set_title('Dynamic GWP, PF_FP_EC') plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_cum_NonRW_PF_FP_S1', dpi=300) plt.draw() #%% #Step (20): Exporting the data behind result graphs to Excel year = [] for x in range (0, 201): year.append(x) ### Create Column Col1 = year ##GWI_Inst #GWI_inst from wood-based scenarios Col_GI_1 = GWI_inst_tot_S1_Ac_7y Col_GI_2 = GWI_inst_tot_S1_Ac_18y Col_GI_3 = GWI_inst_tot_S1_Tgr_60y Col_GI_4 = GWI_inst_tot_E_Hbr_40y #print(Col_GI_1) #print(np.shape(Col_GI_1)) #GWI_inst from counter use scenarios Col_GI_5 = GWI_inst_tot_NonRW_S1_Ac_7y Col_GI_6 = GWI_inst_tot_NonRW_S1_Ac_18y Col_GI_7 = GWI_inst_tot_NonRW_S1_Tgr_60y Col_GI_8 = GWI_inst_tot_NonRW_E_Hbr_40y #print(Col_GI_7) #print(np.shape(Col_GI_7)) #create column results ##GWI_cumulative #GWI_cumulative from wood-based scenarios Col_GC_1 = GWI_cum_S1_Ac_7y Col_GC_2 = GWI_cum_S1_Ac_18y Col_GC_3 = GWI_cum_S1_Tgr_60y Col_GC_4 = GWI_cum_E_Hbr_40y #GWI_cumulative from counter use scenarios Col_GC_5 = GWI_cum_NonRW_S1_Ac_7y Col_GC_6 = GWI_cum_NonRW_S1_Ac_18y Col_GC_7 = GWI_cum_NonRW_S1_Tgr_60y Col_GC_8 = GWI_cum_NonRW_E_Hbr_40y #create column results ##GWPdyn #GWPdyn from wood-based scenarios Col_GWP_1 = GWP_dyn_cum_S1_Ac_7y Col_GWP_2 = GWP_dyn_cum_S1_Ac_18y Col_GWP_3 = GWP_dyn_cum_S1_Tgr_60y Col_GWP_4 = GWP_dyn_cum_E_Hbr_40y #GWPdyn from counter use scenarios Col_GWP_5 = GWP_dyn_cum_NonRW_S1_Ac_7y Col_GWP_6 = GWP_dyn_cum_NonRW_S1_Ac_18y Col_GWP_7 = GWP_dyn_cum_NonRW_S1_Tgr_60y Col_GWP_8 = GWP_dyn_cum_NonRW_E_Hbr_40y #Create colum results dfM_EC_GI = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (W/m2)':Col_GI_1, 'M_EC_Ac_18y (W/m2)':Col_GI_2, 'M_EC_Tgr_60y (W/m2)':Col_GI_3, 'E_EC_Hbr_40y (W/m2)':Col_GI_4, 'NR_M_EC_Ac_7y (W/m2)':Col_GI_5,'NR_M_EC_Ac_18y (W/m2)':Col_GI_6, 'NR_M_EC_Tgr_60y (W/m2)':Col_GI_7, 'NR_E_EC_Hbr_40y (W/m2)':Col_GI_8}) dfM_EC_GC = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (W/m2)':Col_GC_1, 'M_EC_Ac_18y (W/m2)':Col_GC_2, 'M_EC_Tgr_60y (W/m2)':Col_GC_3, 'E_EC_Hbr_40y (W/m2)':Col_GC_4, 'NR_M_EC_Ac_7y (W/m2)':Col_GC_5, 'NR_M_EC_Ac_18y (W/m2)':Col_GC_6, 'NR_M_EC_Tgr_60y (W/m2)':Col_GC_7, 'NR_E_EC_Hbr_40y (W/m2)':Col_GC_8}) dfM_EC_GWPdyn = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (t-CO2-eq)':Col_GWP_1, 'M_EC_Ac_18y (t-CO2-eq)':Col_GWP_2, 'M_EC_Tgr_60y (t-CO2-eq)':Col_GWP_3, 'E_EC_Hbr_40y (t-CO2-eq)':Col_GWP_4, 'NR_M_EC_Ac_7y (t-CO2-eq)':Col_GWP_5, 'NR_M_EC_Ac_18y (t-CO2-eq)':Col_GWP_6, 'NR_M_EC_Tgr_60y (t-CO2-eq)':Col_GWP_7, 'NR_E_EC_Hbr_40y (t-CO2-eq)':Col_GWP_8}) #Export to excel writer = pd.ExcelWriter('GraphResults_PF_FP_EC_RB.xlsx', engine = 'xlsxwriter') dfM_EC_GI.to_excel(writer, sheet_name = 'Inst_GWI_PF_FP_EC', header=True, index=False ) dfM_EC_GC.to_excel(writer, sheet_name = 'Cumulative GWI_PF_FP_EC', header=True, index=False ) dfM_EC_GWPdyn.to_excel(writer, sheet_name = 'GWPdyn_PF_FP_EC', header=True, index=False ) writer.save() writer.close() #%% #Step (21): Generate the excel file for the individual carbon emission and sequestration flows #print year column year = [] for x in range (0, 201): year.append(x) print (year) division = 1000*44/12 division_CH4 = 1000*16/12 #M_Ac_7y c_firewood_energy_S1_Ac7 = [x/division for x in c_firewood_energy_S1_Ac7] decomp_tot_S1_Ac_7y = [x/division for x in decomp_tot_S1_Ac_7y] TestDSM1_Ac7.o = [x/division for x in TestDSM1_Ac7.o] PH_Emissions_HWP1_Ac_7y = [x/division for x in PH_Emissions_HWP1_Ac_7y] #OC_storage_S1_Ac7 = [x/division for x in OC_storage_S1_Ac7] flat_list_Ac_7y = [x/division for x in flat_list_Ac_7y] decomp_tot_CO2_S1_Ac_7y[:,0] = [x/division for x in decomp_tot_CO2_S1_Ac_7y[:,0]] decomp_tot_CH4_S1_Ac_7y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Ac_7y[:,0]] #M_Ac_18y c_firewood_energy_S1_Ac18 = [x/division for x in c_firewood_energy_S1_Ac18] decomp_tot_S1_Ac_18y = [x/division for x in decomp_tot_S1_Ac_18y] TestDSM1_Ac18.o = [x/division for x in TestDSM1_Ac18.o] PH_Emissions_HWP1_Ac_18y = [x/division for x in PH_Emissions_HWP1_Ac_18y] #OC_storage_S1_Ac18 = [x/division for x in OC_storage_S1_Ac18] flat_list_Ac_18y = [x/division for x in flat_list_Ac_18y] decomp_tot_CO2_S1_Ac_18y[:,0] = [x/division for x in decomp_tot_CO2_S1_Ac_18y[:,0]] decomp_tot_CH4_S1_Ac_18y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Ac_18y[:,0]] #M_Tgr_60y c_firewood_energy_S1_Tgr60 = [x/division for x in c_firewood_energy_S1_Tgr60] decomp_tot_S1_Tgr_60y = [x/division for x in decomp_tot_S1_Tgr_60y] TestDSM1_Tgr60.o = [x/division for x in TestDSM1_Tgr60.o] PH_Emissions_HWP1_Tgr_60y = [x/division for x in PH_Emissions_HWP1_Tgr_60y] #OC_storage_S1_Tgr60 = [x/division for x in OC_storage_S1_Tgr60] flat_list_Tgr_60y = [x/division for x in flat_list_Tgr_60y] decomp_tot_CO2_S1_Tgr_60y[:,0] = [x/division for x in decomp_tot_CO2_S1_Tgr_60y[:,0]] decomp_tot_CH4_S1_Tgr_60y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Tgr_60y[:,0]] #E_Hbr_40y c_firewood_energy_E_Hbr40 = [x/division for x in c_firewood_energy_E_Hbr40] c_pellets_Hbr_40y = [x/division for x in c_pellets_Hbr_40y] decomp_tot_E_Hbr_40y = [x/division for x in decomp_tot_E_Hbr_40y] TestDSME_Hbr40.o = [x/division for x in TestDSME_Hbr40.o] PH_Emissions_HWPE_Hbr_40y = [x/division for x in PH_Emissions_HWPE_Hbr_40y] #OC_storage_E_Hbr40 = [x/division for x in OC_storage_E_Hbr40] flat_list_Hbr_40y = [x/division for x in flat_list_Hbr_40y] decomp_tot_CO2_E_Hbr_40y[:,0] = [x/division for x in decomp_tot_CO2_E_Hbr_40y[:,0]] decomp_tot_CH4_E_Hbr_40y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_E_Hbr_40y[:,0]] #landfill aggregate flows Landfill_decomp_PF_FP_S1_Ac_7y = decomp_tot_CH4_S1_Ac_7y, decomp_tot_CO2_S1_Ac_7y Landfill_decomp_PF_FP_S1_Ac_18y = decomp_tot_CH4_S1_Ac_18y, decomp_tot_CO2_S1_Ac_18y Landfill_decomp_PF_FP_S1_Tgr_60y = decomp_tot_CH4_S1_Tgr_60y, decomp_tot_CO2_S1_Tgr_60y Landfill_decomp_PF_FP_E_Hbr_40y = decomp_tot_CH4_E_Hbr_40y, decomp_tot_CO2_E_Hbr_40y Landfill_decomp_PF_FP_S1_Ac_7y = [sum(x) for x in zip(*Landfill_decomp_PF_FP_S1_Ac_7y)] Landfill_decomp_PF_FP_S1_Ac_18y = [sum(x) for x in zip(*Landfill_decomp_PF_FP_S1_Ac_18y)] Landfill_decomp_PF_FP_S1_Tgr_60y = [sum(x) for x in zip(*Landfill_decomp_PF_FP_S1_Tgr_60y)] Landfill_decomp_PF_FP_E_Hbr_40y = [sum(x) for x in zip(*Landfill_decomp_PF_FP_E_Hbr_40y)] Landfill_decomp_PF_FP_S1_Ac_7y = [item for sublist in Landfill_decomp_PF_FP_S1_Ac_7y for item in sublist] Landfill_decomp_PF_FP_S1_Ac_18y = [item for sublist in Landfill_decomp_PF_FP_S1_Ac_18y for item in sublist] Landfill_decomp_PF_FP_S1_Tgr_60y = [item for sublist in Landfill_decomp_PF_FP_S1_Tgr_60y for item in sublist] Landfill_decomp_PF_FP_E_Hbr_40y = [item for sublist in Landfill_decomp_PF_FP_E_Hbr_40y for item in sublist] #M_Ac_7y Column1 = year Column2 = c_firewood_energy_S1_Ac7 Column3 = decomp_tot_S1_Ac_7y Column4 = TestDSM1_Ac7.o Column5 = PH_Emissions_HWP1_Ac_7y #Column6_1 = OC_storage_S1_Ac7 Column6 = Landfill_decomp_PF_FP_S1_Ac_7y Column7 = flat_list_Ac_7y #M_Ac_18y Column8 = c_firewood_energy_S1_Ac18 Column9 = decomp_tot_S1_Ac_18y Column10 = TestDSM1_Ac18.o Column11 = PH_Emissions_HWP1_Ac_18y #Column12_1 = OC_storage_S1_Ac18 Column12 = Landfill_decomp_PF_FP_S1_Ac_18y Column13 = flat_list_Ac_18y #M_Tgr_60y Column14 = c_firewood_energy_S1_Tgr60 Column15 = decomp_tot_S1_Tgr_60y Column16 = TestDSM1_Tgr60.o Column17 = PH_Emissions_HWP1_Tgr_60y #Column18_1 = OC_storage_S1_Tgr60 Column18 = Landfill_decomp_PF_FP_S1_Tgr_60y Column19 = flat_list_Tgr_60y #E_Hbr_40y Column20 = c_firewood_energy_E_Hbr40 Column20_1 = c_pellets_Hbr_40y Column21 = decomp_tot_E_Hbr_40y Column22 = TestDSME_Hbr40.o Column23 = PH_Emissions_HWPE_Hbr_40y #Column24_1 = OC_storage_E_Hbr40 Column24 = Landfill_decomp_PF_FP_E_Hbr_40y Column25 = flat_list_Hbr_40y #create columns dfM_Ac_7y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column7, #'9: Landfill storage (t-C)':Column6_1, 'F1-0: Residue decomposition (t-C)':Column3, 'F6-0-1: Emissions from firewood/other energy use (t-C)':Column2, 'F8-0: Operational stage/processing emissions (t-C)':Column5, 'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column4, 'F7-0: Landfill gas decomposition (t-C)':Column6}) dfM_Ac_18y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column13, # '9: Landfill storage (t-C)':Column12_1, 'F1-0: Residue decomposition (t-C)':Column9, 'F6-0-1: Emissions from firewood/other energy use (t-C)':Column8, 'F8-0: Operational stage/processing emissions (t-C)':Column11, 'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column10, 'F7-0: Landfill gas decomposition (t-C)':Column12}) dfE_Tgr_60y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column19, # '9: Landfill storage (t-C)':Column18_1, 'F1-0: Residue decomposition (t-C)':Column15, 'F6-0-1: Emissions from firewood/other energy use (t-C)':Column14, 'F8-0: Operational stage/processing emissions (t-C)':Column17, 'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column16, 'F7-0: Landfill gas decomposition (t-C)':Column18}) dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column25, # '9: Landfill storage (t-C)':Column24_1, 'F1-0: Residue decomposition (t-C)':Column21, 'F6-0-1: Emissions from firewood/other energy use (t-C)':Column20, 'F8-0: Operational stage/processing emissions (t-C)':Column23, 'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column22, 'F7-0: Landfill gas decomposition (t-C)':Column24, 'F4-0: Emissions from wood pellets use (t-C)':Column20_1}) writer = pd.ExcelWriter('C_flows_PF_FP_EC_RB.xlsx', engine = 'xlsxwriter') dfM_Ac_7y.to_excel(writer, sheet_name = 'PF_FP_M_Ac_7y (EC)', header=True, index=False) dfM_Ac_18y.to_excel(writer, sheet_name = 'PF_FP_M_Ac_18y (EC)', header=True, index=False) dfE_Tgr_60y.to_excel(writer, sheet_name = 'PF_FP_M_Tgr_60y (EC)', header=True, index=False) dfE_Hbr_40y.to_excel(writer, sheet_name = 'PF_FP_E_Hbr_40y (EC)', header=True, index=False) writer.save() writer.close() #%% #Step (22): Plot of the individual carbon emission and sequestration flows for normal and symlog-scale graphs #PF_FP_M_EC_Ac_7y (Existing conversion efficiency) fig=plt.figure() fig.show() ax1=fig.add_subplot(111) ax1.plot(t, flat_list_Ac_7y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax1.plot(t, OC_storage_S1_Ac7, color='darkturquoise', label='9: Landfill storage') ax1.plot(t, decomp_tot_S1_Ac_7y, color='lightcoral', label='F1-0: Residue decomposition') ax1.plot(t, c_firewood_energy_S1_Ac7, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax1.plot(t, PH_Emissions_HWP1_Ac_7y, color='orange', label='F8-0: Operational stage/processing emissions') ax1.plot(t, TestDSM1_Ac7.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow') ax1.plot(t, Landfill_decomp_PF_FP_S1_Ac_7y, color='yellow', label='F7-0: Landfill gas decomposition') ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) ax1.set_xlim(-1,200) ax1.set_yscale('symlog') ax1.set_xlabel('Time (year)') ax1.set_ylabel('C flows (t-C) (symlog)') ax1.set_title('Carbon flow, PF_FP_M_Ac_7y (EC) (symlog-scale)') plt.show() #%% #plot for the individual carbon flows #PF_FP_M_EC_Ac_7y (Existing conversion efficiency) f, (ax_a, ax_b) = plt.subplots(2, 1, sharex=True) ax_a.plot(t, flat_list_Ac_7y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax_a.plot(t, OC_storage_S1_Ac7, color='darkturquoise', label='9: Landfill storage') ax_a.plot(t, decomp_tot_S1_Ac_7y, color='lightcoral', label='F1-0: Residue decomposition') ax_a.plot(t, c_firewood_energy_S1_Ac7, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax_a.plot(t, PH_Emissions_HWP1_Ac_7y, color='orange', label='F8-0: Operational stage/processing emissions') ax_a.plot(t, TestDSM1_Ac7.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow') ax_a.plot(t, Landfill_decomp_PF_FP_S1_Ac_7y, color='yellow', label='F7-0: Landfill gas decomposition') ax_b.plot(t, c_firewood_energy_S1_Ac7, color='mediumseagreen') ax_b.plot(t, decomp_tot_S1_Ac_7y, color='lightcoral') ax_b.plot(t, TestDSM1_Ac7.o, color='royalblue') ax_b.plot(t, PH_Emissions_HWP1_Ac_7y, color='orange') #ax_b.plot(t, OC_storage_S1_Ac7, color='darkturquoise') ax_b.plot(t, Landfill_decomp_PF_FP_S1_Ac_7y, color='yellow') ax_b.plot(t, flat_list_Ac_7y, color='darkkhaki') # zoom-in / limit the view to different portions of the data ax_a.set_xlim(-1,200) ax_a.set_ylim(140, 160) ax_b.set_ylim(-30, 50) # hide the spines between ax and ax2 ax_a.spines['bottom'].set_visible(False) ax_b.spines['top'].set_visible(False) ax_a.xaxis.tick_top() ax_a.tick_params(labeltop=False) # don't put tick labels at the top ax_b.xaxis.tick_bottom() ax_a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) d = .012 # how big to make the diagonal lines in axes coordinates # arguments to pass to plot, just so we don't keep repeating them kwargs = dict(transform=ax_a.transAxes, color='k', clip_on=False) ax_a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal ax_a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal kwargs.update(transform=ax_b.transAxes) # switch to the bottom axes ax_b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal ax_b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal ax_b.set_xlabel('Time (year)') ax_b.set_ylabel('C flows (t-C)') ax_a.set_ylabel('C flows (t-C)') ax_a.set_title('Carbon flow, PF_FP_M_Ac_7y (EC)') #plt.plot(t, Cflow_PF_SF_S1) #plt.plot(t, Cflow_PF_SF_S2) #plt.plot(t, Cflow_PF_SF_E) #plt.xlim([0, 200]) plt.show() #%% #plot for the individual carbon flows - test for symlog-scale graphs #PF_FP_M_EC_Ac_18y (Existing conversion efficiency) fig=plt.figure() fig.show() ax2=fig.add_subplot(111) ax2.plot(t, flat_list_Ac_18y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax2.plot(t, OC_storage_S1_Ac18, color='darkturquoise', label='9: Landfill storage') ax2.plot(t, decomp_tot_S1_Ac_18y, color='lightcoral', label='F1-0: Residue decomposition') ax2.plot(t, c_firewood_energy_S1_Ac18, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax2.plot(t, PH_Emissions_HWP1_Ac_18y, color='orange', label='F8-0: Operational stage/processing emissions') ax2.plot(t, TestDSM1_Ac18.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow') ax2.plot(t, Landfill_decomp_PF_FP_S1_Ac_18y, color='yellow', label='F7-0: Landfill gas decomposition') ax2.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) ax2.set_xlim(-1,200) ax2.set_yscale('symlog') ax2.set_xlabel('Time (year)') ax2.set_ylabel('C flows (t-C) (symlog)') ax2.set_title('Carbon flow, PF_FP_M_Ac_18y (EC) (symlog-scale)') plt.show() #%% #plot for the individual carbon flows #PF_FP_M_EC_Ac_18y (Existing conversion efficiency) f, (ax_c, ax_d) = plt.subplots(2, 1, sharex=True) ax_c.plot(t, flat_list_Ac_18y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax_c.plot(t, OC_storage_S1_Ac18, color='darkturquoise', label='9: Landfill storage') ax_c.plot(t, decomp_tot_S1_Ac_18y, color='lightcoral', label='F1-0: Residue decomposition') ax_c.plot(t, c_firewood_energy_S1_Ac18, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax_c.plot(t, PH_Emissions_HWP1_Ac_18y, color='orange', label='F8-0: Operational stage/processing emissions') ax_c.plot(t, TestDSM1_Ac18.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow') ax_c.plot(t, Landfill_decomp_PF_FP_S1_Ac_18y, color='yellow', label='F7-0: Landfill gas decomposition') ax_d.plot(t, c_firewood_energy_S1_Ac18, color='mediumseagreen') ax_d.plot(t, decomp_tot_S1_Ac_18y, color='lightcoral') ax_d.plot(t, TestDSM1_Ac18.o,color='royalblue') ax_d.plot(t, PH_Emissions_HWP1_Ac_18y, color='orange') #ax_d.plot(t, OC_storage_S1_Ac18, color='darkturquoise') ax_d.plot(t, Landfill_decomp_PF_FP_S1_Ac_18y, color='yellow') ax_d.plot(t, flat_list_Ac_18y, color='darkkhaki') # zoom-in / limit the view to different portions of the data ax_c.set_xlim(-1,200) ax_c.set_ylim(125, 145) ax_d.set_ylim(-25, 50) # hide the spines between ax and ax2 ax_c.spines['bottom'].set_visible(False) ax_d.spines['top'].set_visible(False) ax_c.xaxis.tick_top() ax_c.tick_params(labeltop=False) # don't put tick labels at the top ax_d.xaxis.tick_bottom() ax_c.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) d = .012 # how big to make the diagonal lines in axes coordinates # arguments to pass to plot, just so we don't keep repeating them kwargs = dict(transform=ax_c.transAxes, color='k', clip_on=False) ax_c.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal ax_c.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal kwargs.update(transform=ax_d.transAxes) # switch to the bottom axes ax_d.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal ax_d.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal ax_d.set_xlabel('Time (year)') ax_d.set_ylabel('C flows (t-C)') ax_c.set_ylabel('C flows (t-C)') ax_c.set_title('Carbon flow, PF_FP_M_Ac_18y (EC)') #plt.plot(t, Cflow_PF_SF_S1) #plt.plot(t, Cflow_PF_SF_S2) #plt.plot(t, Cflow_PF_SF_E) #plt.xlim([0, 200]) plt.show() #%% #plot for the individual carbon flows - test for symlog-scale graphs #PF_FP_M_EC_Tgr_60y (Existing conversion efficiency) fig=plt.figure() fig.show() ax3=fig.add_subplot(111) ax3.plot(t, flat_list_Tgr_60y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax3.plot(t, OC_storage_S1_Tgr60, color='darkturquoise', label='9: Landfill storage') ax3.plot(t, decomp_tot_S1_Tgr_60y, color='lightcoral', label='F1-0: Residue decomposition') ax3.plot(t, c_firewood_energy_S1_Tgr60, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax3.plot(t, PH_Emissions_HWP1_Tgr_60y, color='orange', label='F8-0: Operational stage/processing emissions') ax3.plot(t, TestDSM1_Tgr60.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow') ax3.plot(t, Landfill_decomp_PF_FP_S1_Tgr_60y, color='yellow', label='F7-0: Landfill gas decomposition') ax3.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) ax3.set_xlim(-1,200) ax3.set_yscale('symlog') ax3.set_xlabel('Time (year)') ax3.set_ylabel('C flows (t-C) (symlog)') ax3.set_title('Carbon flow, PF_FP_M_Tgr_60y (EC) (symlog-scale)') plt.show() #%% #plot for the individual carbon flows #PF_FP_M_EC_Tgr_60y (Existing conversion efficiency) f, (ax_e, ax_f) = plt.subplots(2, 1, sharex=True) ax_e.plot(t, flat_list_Tgr_60y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax_e.plot(t, OC_storage_S1_Tgr60, color='darkturquoise', label='9: Landfill storage') ax_e.plot(t, decomp_tot_S1_Tgr_60y, color='lightcoral', label='F1-0: Residue decomposition') ax_e.plot(t, c_firewood_energy_S1_Tgr60, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax_e.plot(t, PH_Emissions_HWP1_Tgr_60y, color='orange', label='F8-0: Operational stage/processing emissions') ax_e.plot(t, TestDSM1_Tgr60.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow') ax_e.plot(t, Landfill_decomp_PF_FP_S1_Tgr_60y, color='yellow', label='F7-0: Landfill gas decomposition') ax_f.plot(t, c_firewood_energy_S1_Tgr60, color='mediumseagreen') ax_f.plot(t, decomp_tot_S1_Tgr_60y, color='lightcoral') ax_f.plot(t, TestDSM1_Tgr60.o, color='royalblue') ax_f.plot(t, PH_Emissions_HWP1_Tgr_60y, color='orange') #ax_f.plot(t, OC_storage_S1_Tgr60, color='darkturquoise') ax_f.plot(t, Landfill_decomp_PF_FP_S1_Tgr_60y, color='yellow') ax_f.plot(t, flat_list_Tgr_60y, color='darkkhaki') # zoom-in / limit the view to different portions of the data ax_e.set_xlim(-1,200) ax_e.set_ylim(125, 145) ax_f.set_ylim(-25, 65) # hide the spines between ax and ax2 ax_e.spines['bottom'].set_visible(False) ax_f.spines['top'].set_visible(False) ax_e.xaxis.tick_top() ax_e.tick_params(labeltop=False) # don't put tick labels at the top ax_f.xaxis.tick_bottom() ax_e.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) d = .012 # how big to make the diagonal lines in axes coordinates # arguments to pass to plot, just so we don't keep repeating them kwargs = dict(transform=ax_e.transAxes, color='k', clip_on=False) ax_e.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal ax_e.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal kwargs.update(transform=ax_f.transAxes) # switch to the bottom axes ax_f.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal ax_f.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal ax_f.set_xlabel('Time (year)') ax_f.set_ylabel('C flows (t-C)') ax_e.set_ylabel('C flows (t-C)') ax_e.set_title('Carbon flow, PF_FP_M_Tgr_60y (EC)') #plt.plot(t, Cflow_PF_SF_S1) #plt.plot(t, Cflow_PF_SF_S2) #plt.plot(t, Cflow_PF_SF_E) #plt.xlim([0, 200]) plt.show() #%% #plot for the individual carbon flows - test for symlog-scale graphs #PF_FP_E_EC_Hbr_40y (Existing conversion efficiency) fig=plt.figure() fig.show() ax4=fig.add_subplot(111) ax4.plot(t, flat_list_Hbr_40y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax4.plot(t, OC_storage_E_Hbr40, color='darkturquoise', label='9: Landfill storage') ax4.plot(t, decomp_tot_E_Hbr_40y, color='lightcoral', label='F1-0: Residue decomposition') ax4.plot(t, c_firewood_energy_E_Hbr40, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax4.plot(t, PH_Emissions_HWPE_Hbr_40y, color='orange', label='F8-0: Operational stage/processing emissions') ax4.plot(t, Landfill_decomp_PF_FP_E_Hbr_40y, color='yellow', label='F7-0: Landfill gas decomposition') ax4.plot(t, c_pellets_Hbr_40y, color='slategrey', label='F4-0: Emissions from wood pellets use') #ax4.plot(t, TestDSME_Hbr40.o, label='in-use stock output') ax4.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) ax4.set_xlim(-1,200) ax4.set_yscale('symlog') ax4.set_xlabel('Time (year)') ax4.set_ylabel('C flows (t-C) (symlog)') ax4.set_title('Carbon flow, PF_FP_E_Hbr_40y (EC) (symlog-scale)') plt.show() #%% #plot for the individual carbon flows #PF_FP_E_EC_Hbr_40y (Existing conversion efficiency) f, (ax_g, ax_h) = plt.subplots(2, 1, sharex=True) ax_g.plot(t, flat_list_Hbr_40y, color='darkkhaki', label='F0-1: Biomass C sequestration') #ax_g.plot(t, OC_storage_E_Hbr40, color='darkturquoise', label='9: Landfill storage') ax_g.plot(t, decomp_tot_E_Hbr_40y, color='lightcoral', label='F1-0: Residue decomposition') ax_g.plot(t, c_firewood_energy_E_Hbr40, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use') ax_g.plot(t, PH_Emissions_HWPE_Hbr_40y, color='orange', label='F8-0: Operational stage/processing emissions') ax_g.plot(t, Landfill_decomp_PF_FP_E_Hbr_40y, color='yellow', label='F7-0: Landfill gas decomposition') ax_g.plot(t, c_pellets_Hbr_40y, color='slategrey', label='F4-0: Emissions from wood pellets use') #ax_g.plot(t, TestDSME_Hbr40.o, label='in-use stock output') ax_h.plot(t, c_firewood_energy_E_Hbr40, color='mediumseagreen') ax_h.plot(t, c_pellets_Hbr_40y, color='slategrey') ax_h.plot(t, decomp_tot_E_Hbr_40y, color='lightcoral') #ax_h.plot(t, TestDSME_Hbr40.o) ax_h.plot(t, PH_Emissions_HWPE_Hbr_40y, color='orange') #ax_h.plot(t, OC_storage_E_Hbr40, color='darkturquoise') ax_h.plot(t, Landfill_decomp_PF_FP_E_Hbr_40y, color='yellow') ax_h.plot(t, flat_list_Hbr_40y, color='darkkhaki') # zoom-in / limit the view to different portions of the data ax_g.set_xlim(-1,200) ax_g.set_ylim(90, 110) ax_h.set_ylim(-25, 35) # hide the spines between ax and ax2 ax_g.spines['bottom'].set_visible(False) ax_h.spines['top'].set_visible(False) ax_g.xaxis.tick_top() ax_g.tick_params(labeltop=False) # don't put tick labels at the top ax_h.xaxis.tick_bottom() ax_g.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) d = .012 # how big to make the diagonal lines in axes coordinates # arguments to pass to plot, just so we don't keep repeating them kwargs = dict(transform=ax_g.transAxes, color='k', clip_on=False) ax_g.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal ax_g.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal kwargs.update(transform=ax_h.transAxes) # switch to the bottom axes ax_h.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal ax_h.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal ax_h.set_xlabel('Time (year)') ax_h.set_ylabel('C flows (t-C)') ax_g.set_ylabel('C flows (t-C)') ax_g.set_title('Carbon flow, PF_FP_E_Hbr_40y (EC)') #plt.plot(t, Cflow_PF_SF_S1) #plt.plot(t, Cflow_PF_SF_S2) #plt.plot(t, Cflow_PF_SF_E) #plt.xlim([0, 200]) plt.show() #%% #Step (23): Generate the excel file for the net carbon balance Agg_Cflow_S1_Ac_7y = [c_firewood_energy_S1_Ac7, decomp_tot_S1_Ac_7y, TestDSM1_Ac7.o, PH_Emissions_HWP1_Ac_7y, Landfill_decomp_PF_FP_S1_Ac_7y, flat_list_Ac_7y] Agg_Cflow_S1_Ac_18y = [c_firewood_energy_S1_Ac18, decomp_tot_S1_Ac_18y, TestDSM1_Ac18.o, PH_Emissions_HWP1_Ac_18y, Landfill_decomp_PF_FP_S1_Ac_18y, flat_list_Ac_18y] Agg_Cflow_S1_Tgr_60y = [c_firewood_energy_S1_Tgr60, decomp_tot_S1_Tgr_60y, TestDSM1_Tgr60.o, PH_Emissions_HWP1_Tgr_60y, Landfill_decomp_PF_FP_S1_Tgr_60y, flat_list_Tgr_60y] Agg_Cflow_E_Hbr_40y = [c_firewood_energy_E_Hbr40, c_pellets_Hbr_40y, decomp_tot_E_Hbr_40y, TestDSME_Hbr40.o, PH_Emissions_HWPE_Hbr_40y, Landfill_decomp_PF_FP_E_Hbr_40y, flat_list_Hbr_40y] Agg_Cflow_PF_FP_S1_Ac_7y = [sum(x) for x in zip(*Agg_Cflow_S1_Ac_7y)] Agg_Cflow_PF_FP_S1_Ac_18y = [sum(x) for x in zip(*Agg_Cflow_S1_Ac_18y)] Agg_Cflow_PF_FP_S1_Tgr_60y = [sum(x) for x in zip(*Agg_Cflow_S1_Tgr_60y)] Agg_Cflow_PF_FP_E_Hbr_40y = [sum(x) for x in zip(*Agg_Cflow_E_Hbr_40y)] fig=plt.figure() fig.show() ax5=fig.add_subplot(111) # plot ax5.plot(t, Agg_Cflow_PF_FP_S1_Ac_7y, color='orange', label='M_EC_Ac_7y') ax5.plot(t, Agg_Cflow_PF_FP_S1_Ac_18y, color='darkturquoise', label='M_EC_Ac_18y') ax5.plot(t, Agg_Cflow_PF_FP_S1_Tgr_60y, color='lightcoral', label='M_EC_Tgr_60y') ax5.plot(t, Agg_Cflow_PF_FP_E_Hbr_40y, color='mediumseagreen', label='E_EC_Hbr_40y') ax5.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) ax5.set_xlim(-1,200) #ax5.set_yscale('symlog') ax5.set_xlabel('Time (year)') ax5.set_ylabel('C flows (t-C) (symlog)') ax5.set_title('Aggr. C-emissions/sequestration flow, PF_FP_EC (symlog-scale)') plt.show() #create column year year = [] for x in range (0, 201): year.append(x) print (year) #Create colum results dfM_PF_FP_EC = pd.DataFrame.from_dict({'Year':year,'M_EC_Ac_7y (t-C)':Agg_Cflow_PF_FP_S1_Ac_7y, 'M_EC_Ac_18y (t-C)':Agg_Cflow_PF_FP_S1_Ac_18y, 'M_EC_Tgr_60y (t-C)':Agg_Cflow_PF_FP_S1_Tgr_60y, 'E_EC_Hbr_40y (t-C)':Agg_Cflow_PF_FP_E_Hbr_40y}) #Export to excel writer = pd.ExcelWriter('AggCFlow_PF_FP_EC_RB.xlsx', engine = 'xlsxwriter') dfM_PF_FP_EC.to_excel(writer, sheet_name = 'PF_FP_EC', header=True, index=False) writer.save() writer.close() #%% #Step (24): Plot the net carbon balance f, (ax5a, ax5b) = plt.subplots(2, 1, sharex=True) # plot ax5a.plot(t, Agg_Cflow_PF_FP_S1_Ac_7y, color='orange', label='M_EC_Ac_7y') ax5a.plot(t, Agg_Cflow_PF_FP_S1_Ac_18y, color='darkturquoise', label='M_EC_Ac_18y') ax5a.plot(t, Agg_Cflow_PF_FP_S1_Tgr_60y, color='lightcoral', label='M_EC_Tgr_60y') ax5a.plot(t, Agg_Cflow_PF_FP_E_Hbr_40y, color='mediumseagreen', label='E_EC_Hbr_40y') ax5a.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75) ax5b.plot(t, Agg_Cflow_PF_FP_S1_Ac_7y, color='orange') ax5b.plot(t, Agg_Cflow_PF_FP_S1_Ac_18y, color='darkturquoise') ax5b.plot(t, Agg_Cflow_PF_FP_S1_Tgr_60y, color='lightcoral') ax5b.plot(t, Agg_Cflow_PF_FP_E_Hbr_40y, color='mediumseagreen') ax5b.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75) # zoom-in / limit the view to different portions of the data ax5a.set_xlim(-1,200) ax5a.set_ylim(200, 220) ax5b.set_ylim(-25, 65) # hide the spines between ax and ax2 ax5a.spines['bottom'].set_visible(False) ax5b.spines['top'].set_visible(False) ax5a.xaxis.tick_top() ax5a.tick_params(labeltop=False) # don't put tick labels at the top ax5b.xaxis.tick_bottom() ax5a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False) d = .012 # how big to make the diagonal lines in axes coordinates # arguments to pass to plot, just so we don't keep repeating them kwargs = dict(transform=ax5a.transAxes, color='k', clip_on=False) ax5a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal ax5a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal kwargs.update(transform=ax5b.transAxes) # switch to the bottom axes ax5b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal ax5b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal ax5b.set_xlabel('Time (year)') ax5b.set_ylabel('C flows (t-C)') ax5a.set_ylabel('C flows (t-C)') ax5a.set_title('Net carbon balance, PF_FP_EC') plt.show() #%% #Step (25): Generate the excel file for documentation of individual carbon flows in the system definition (Fig. 1) #print year column year = [] for x in range (0, 201): year.append(x) print (year) df1_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_7y') df1_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Ac_18y') df1_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_40y') df1_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') dfE_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_FP.xlsx', 'PF_FP_E_Hbr_40y') Column1 = year division = 1000*44/12 division_CH4 = 1000*16/12 ## S1_Ac_7y ## define the input flow for the landfill (F5-7) OC_storage_S1_Ac7 = df1_Ac7['Other_C_storage'].values OC_storage_S1_Ac7 = [x/division for x in OC_storage_S1_Ac7] OC_storage_S1_Ac7 = [abs(number) for number in OC_storage_S1_Ac7] C_LF_S1_Ac7 = [x*1/0.82 for x in OC_storage_S1_Ac7] ## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3) HWP_S1_Ac7 = [x/division for x in df1_Ac7['Input_PF'].values] HWP_S1_Ac7_energy = [x*1/3 for x in c_firewood_energy_S1_Ac7] HWP_S1_Ac7_landfill = [x*1/0.82 for x in OC_storage_S1_Ac7] HWP_S1_Ac7_sum = [HWP_S1_Ac7, HWP_S1_Ac7_energy, HWP_S1_Ac7_landfill] HWP_S1_Ac7_sum = [sum(x) for x in zip(*HWP_S1_Ac7_sum)] #in-use stocks (S-4) TestDSM1_Ac7.s = [x/division for x in TestDSM1_Ac7.s] #TestDSM2_Ac7.i = [x/division for x in TestDSM2_Ac7.i] #calculate the F1-2 #In general, F1-2 = F2-3 + F2-6, #To split the F1-2 to F1a-2 and F1c-2, we need to differentiate the flow for the initial land conversion (PF) and the subsequent land type (FP) #create F1a-2 #tf = 201 #zero_PF_S2_Ac_7y = (tf,1) #PF_S2_Ac_7y = np.zeros(zero_PF_S2_Ac_7y) #PF_S2_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S2_Ac7_sum, [x*2/3 for x in c_firewood_energy_S2_Ac7])][0:8] #create F1c-2 #zero_FP_S2_Ac_7y = (tf,1) #FP_S2_Ac_7y = np.zeros(zero_FP_S2_Ac_7y) #FP_S2_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S2_Ac7_sum, [x*2/3 for x in c_firewood_energy_S2_Ac7])][8:tf] # calculate C stocks in landfill (S-7) tf = 201 zero_matrix_stocks_S1_Ac_7y = (tf,1) stocks_S1_Ac_7y = np.zeros(zero_matrix_stocks_S1_Ac_7y) i = 0 stocks_S1_Ac_7y[0] = C_LF_S1_Ac7[0] - Landfill_decomp_PF_FP_S1_Ac_7y[0] while i < tf-1: stocks_S1_Ac_7y[i+1] = np.array(C_LF_S1_Ac7[i+1] - Landfill_decomp_PF_FP_S1_Ac_7y[i+1] + stocks_S1_Ac_7y[i]) i = i + 1 ## calculate aggregate flow of logged wood (F1-2) HWP_logged_S1_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S1_Ac7_sum, [x*2/3 for x in c_firewood_energy_S1_Ac7])] ## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c) tf = 201 zero_matrix_ForCstocks_S1_Ac_7y = (tf,1) ForCstocks_S1_Ac_7y = np.zeros(zero_matrix_ForCstocks_S1_Ac_7y) i = 0 ForCstocks_S1_Ac_7y[0] = initAGB - flat_list_Ac_7y[0] - decomp_tot_S1_Ac_7y[0] - HWP_logged_S1_Ac_7y[0] while i < tf-1: ForCstocks_S1_Ac_7y[i+1] = np.array(ForCstocks_S1_Ac_7y[i] - flat_list_Ac_7y[i+1] - decomp_tot_S1_Ac_7y[i+1] - HWP_logged_S1_Ac_7y[i+1]) i = i + 1 ##NonRW materials/energy amount (F9-0-1) df1_amount_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_FP.xlsx', 'PF_FP_S1_Ac_7y') NonRW_amount_S1_Ac_7y = df1_amount_Ac7['NonRW_amount'].values NonRW_amount_S1_Ac_7y = [x/1000 for x in NonRW_amount_S1_Ac_7y] ##NonRW emissions (F9-0-2) emissions_NonRW_S1_Ac_7y = [x/division for x in emissions_NonRW_S1_Ac_7y] #create columns dfM_Ac_7y = pd.DataFrame.from_dict({'Year':Column1, 'F0-1 (t-C)': flat_list_Ac_7y, 'F1-0 (t-C)': decomp_tot_S1_Ac_7y, #'F1a-2 (t-C)': PF_S2_Ac_7y, #'F1c-2 (t-C)': FP_S2_Ac_7y, 'F1-2 (t-C)': HWP_logged_S1_Ac_7y, 'St-1 (t-C)':ForCstocks_S1_Ac_7y[:,0], 'F2-3 (t-C)': HWP_S1_Ac7_sum, 'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Ac7], 'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Ac7_sum, [x*1/0.82 for x in OC_storage_S1_Ac7], [x*1/3 for x in c_firewood_energy_S1_Ac7])], 'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Ac7], 'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Ac7], # 'F4-0 (t-C)':, 'St-4 (t-C)': TestDSM1_Ac7.s, #'S-4-i (t-C)': TestDSM1_Ac7.i, 'F4-5 (t-C)': TestDSM1_Ac7.o, 'F5-6 (t-C)': TestDSM1_Ac7.o, 'F5-7 (t-C)': C_LF_S1_Ac7, 'F6-0-1 (t-C)': c_firewood_energy_S1_Ac7, 'F6-0-2 (t-C)': TestDSM1_Ac7.o, 'St-7 (t-C)': stocks_S1_Ac_7y[:,0], 'F7-0 (t-C)': Landfill_decomp_PF_FP_S1_Ac_7y, 'F8-0 (t-C)': PH_Emissions_HWP1_Ac_7y, 'S9-0 (t)': NonRW_amount_S1_Ac_7y, 'F9-0 (t-C)': emissions_NonRW_S1_Ac_7y, }) ##S1_Ac_18y ## define the input flow for the landfill (F5-7) OC_storage_S1_Ac18 = df1_Ac18['Other_C_storage'].values OC_storage_S1_Ac18 = [x/division for x in OC_storage_S1_Ac18] OC_storage_S1_Ac18 = [abs(number) for number in OC_storage_S1_Ac18] C_LF_S1_Ac18 = [x*1/0.82 for x in OC_storage_S1_Ac18] ## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3) HWP_S1_Ac18 = [x/division for x in df1_Ac18['Input_PF'].values] HWP_S1_Ac18_energy = [x*1/3 for x in c_firewood_energy_S1_Ac18] HWP_S1_Ac18_landfill = [x*1/0.82 for x in OC_storage_S1_Ac18] HWP_S1_Ac18_sum = [HWP_S1_Ac18, HWP_S1_Ac18_energy, HWP_S1_Ac18_landfill] HWP_S1_Ac18_sum = [sum(x) for x in zip(*HWP_S1_Ac18_sum)] ## in-use stocks (S-4) TestDSM1_Ac18.s = [x/division for x in TestDSM1_Ac18.s] #TestDSM1_Ac18.i = [x/division for x in TestDSM1_Ac18.i] #calculate C stocks in landfill (S-7) tf = 201 zero_matrix_stocks_S1_Ac_18y = (tf,1) stocks_S1_Ac_18y = np.zeros(zero_matrix_stocks_S1_Ac_18y) i = 0 stocks_S1_Ac_18y[0] = C_LF_S1_Ac18[0] - Landfill_decomp_PF_FP_S1_Ac_18y[0] while i < tf-1: stocks_S1_Ac_18y[i+1] = np.array(C_LF_S1_Ac18[i+1] - Landfill_decomp_PF_FP_S1_Ac_18y[i+1] + stocks_S1_Ac_18y[i]) i = i + 1 ## calculate aggregate flow of logged wood (F1-2) HWP_logged_S1_Ac_18y = [x1+x2 for (x1,x2) in zip(HWP_S1_Ac18_sum, [x*2/3 for x in c_firewood_energy_S1_Ac18])] ## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c) tf = 201 zero_matrix_ForCstocks_S1_Ac_18y = (tf,1) ForCstocks_S1_Ac_18y = np.zeros(zero_matrix_ForCstocks_S1_Ac_18y) i = 0 ForCstocks_S1_Ac_18y[0] = initAGB - flat_list_Ac_18y[0] - decomp_tot_S1_Ac_18y[0] - HWP_logged_S1_Ac_18y[0] while i < tf-1: ForCstocks_S1_Ac_18y[i+1] = np.array(ForCstocks_S1_Ac_18y[i] - flat_list_Ac_18y[i+1] - decomp_tot_S1_Ac_18y[i+1] - HWP_logged_S1_Ac_18y[i+1]) i = i + 1 ##NonRW materials/energy amount (F9-0-1) df1_amount_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_FP.xlsx', 'PF_FP_S1_Ac_18y') NonRW_amount_S1_Ac_18y = df1_amount_Ac18['NonRW_amount'].values NonRW_amount_S1_Ac_18y = [x/1000 for x in NonRW_amount_S1_Ac_18y] ##NonRW emissions (F9-0-2) emissions_NonRW_S1_Ac_18y = [x/division for x in emissions_NonRW_S1_Ac_18y] #create columns dfM_Ac_18y = pd.DataFrame.from_dict({'Year':Column1, 'F0-1 (t-C)': flat_list_Ac_18y, 'F1-0 (t-C)': decomp_tot_S1_Ac_18y, #'F1a-2 (t-C)': PF_S1_Ac_18y, #'F1c-2 (t-C)': FP_S1_Ac_18y, 'F1-2 (t-C)': HWP_logged_S1_Ac_18y, 'St-1 (t-C)':ForCstocks_S1_Ac_18y[:,0], 'F2-3 (t-C)': HWP_S1_Ac18_sum, 'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Ac18], 'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Ac18_sum, [x*1/0.82 for x in OC_storage_S1_Ac18], [x*1/3 for x in c_firewood_energy_S1_Ac18])], 'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Ac18], 'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Ac18], # 'F4-0 (t-C)':, 'St-4 (t-C)': TestDSM1_Ac18.s, #'S-4-i (t-C)': TestDSM1_Ac7.i, 'F4-5 (t-C)': TestDSM1_Ac18.o, 'F5-6 (t-C)': TestDSM1_Ac18.o, 'F5-7 (t-C)': C_LF_S1_Ac18, 'F6-0-1 (t-C)': c_firewood_energy_S1_Ac18, 'F6-0-2 (t-C)': TestDSM1_Ac18.o, 'St-7 (t-C)': stocks_S1_Ac_18y[:,0], 'F7-0 (t-C)': Landfill_decomp_PF_FP_S1_Ac_18y, 'F8-0 (t-C)': PH_Emissions_HWP1_Ac_18y, 'S9-0 (t)': NonRW_amount_S1_Ac_18y, 'F9-0 (t-C)': emissions_NonRW_S1_Ac_18y, }) ##S1_Tgr_60y ## define the input flow for the landfill (F5-7) OC_storage_S1_Tgr60 = df1_Tgr60['Other_C_storage'].values OC_storage_S1_Tgr60 = [x/division for x in OC_storage_S1_Tgr60] OC_storage_S1_Tgr60 = [abs(number) for number in OC_storage_S1_Tgr60] C_LF_S1_Tgr60 = [x*1/0.82 for x in OC_storage_S1_Tgr60] ## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3) HWP_S1_Tgr60 = [x/division for x in df1_Tgr60['Input_PF'].values] HWP_S1_Tgr60_energy = [x*1/3 for x in c_firewood_energy_S1_Tgr60] HWP_S1_Tgr60_landfill = [x*1/0.82 for x in OC_storage_S1_Tgr60] HWP_S1_Tgr60_sum = [HWP_S1_Tgr60, HWP_S1_Tgr60_energy, HWP_S1_Tgr60_landfill] HWP_S1_Tgr60_sum = [sum(x) for x in zip(*HWP_S1_Tgr60_sum )] ## in-use stocks (S-4) TestDSM1_Tgr60.s = [x/division for x in TestDSM1_Tgr60.s] #TestDSM1_Tgr60.i = [x/division for x in TestDSM1_Tgr60.i] ## calculate C stocks in landfill (S-7) tf = 201 zero_matrix_stocks_S1_Tgr_60y = (tf,1) stocks_S1_Tgr_60y = np.zeros(zero_matrix_stocks_S1_Tgr_60y) i = 0 stocks_S1_Tgr_60y[0] = C_LF_S1_Tgr60[0] - Landfill_decomp_PF_FP_S1_Tgr_60y[0] while i < tf-1: stocks_S1_Tgr_60y[i+1] = np.array(C_LF_S1_Tgr60[i+1] - Landfill_decomp_PF_FP_S1_Tgr_60y[i+1] + stocks_S1_Tgr_60y[i]) i = i + 1 ## calculate aggregate flow of logged wood (F1-2) HWP_logged_S1_Tgr_60y = [x1+x2 for (x1,x2) in zip(HWP_S1_Tgr60_sum, [x*2/3 for x in c_firewood_energy_S1_Tgr60])] ## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c) tf = 201 zero_matrix_ForCstocks_S1_Tgr_60y = (tf,1) ForCstocks_S1_Tgr_60y = np.zeros(zero_matrix_ForCstocks_S1_Tgr_60y) i = 0 ForCstocks_S1_Tgr_60y[0] = initAGB - flat_list_Tgr_60y[0] - decomp_tot_S1_Tgr_60y[0] - HWP_logged_S1_Tgr_60y[0] while i < tf-1: ForCstocks_S1_Tgr_60y[i+1] = np.array(ForCstocks_S1_Tgr_60y[i] - flat_list_Tgr_60y[i+1] - decomp_tot_S1_Tgr_60y[i+1] - HWP_logged_S1_Tgr_60y[i+1]) i = i + 1 ##NonRW materials/energy amount (F9-0-1) df1_amount_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_FP.xlsx', 'PF_FP_S1_Tgr_60y') NonRW_amount_S1_Tgr_60y = df1_amount_Tgr60['NonRW_amount'].values NonRW_amount_S1_Tgr_60y = [x/1000 for x in NonRW_amount_S1_Tgr_60y] ##NonRW emissions (F9-0-2) emissions_NonRW_S1_Tgr_60y = [x/division for x in emissions_NonRW_S1_Tgr_60y] #create columns dfM_Tgr_60y = pd.DataFrame.from_dict({'Year':Column1, 'F0-1 (t-C)': flat_list_Tgr_60y, 'F1-0 (t-C)': decomp_tot_S1_Tgr_60y, #'F1a-2 (t-C)': PF_S1_Tgr_60y, #'F1c-2 (t-C)': FP_S1_Tgr_60y, 'F1-2 (t-C)': HWP_logged_S1_Tgr_60y, 'St-1 (t-C)':ForCstocks_S1_Tgr_60y[:,0], 'F2-3 (t-C)': HWP_S1_Tgr60_sum, 'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Tgr60], 'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Tgr60_sum, [x*1/0.82 for x in OC_storage_S1_Tgr60], [x*1/3 for x in c_firewood_energy_S1_Tgr60])], 'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Tgr60], 'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Tgr60], # 'F4-0 (t-C)':, 'St-4 (t-C)': TestDSM1_Tgr60.s, #'S-4-i (t-C)': TestDSM1_Tgr60.i, 'F4-5 (t-C)': TestDSM1_Tgr60.o, 'F5-6 (t-C)': TestDSM1_Tgr60.o, 'F5-7 (t-C)': C_LF_S1_Tgr60, 'F6-0-1 (t-C)': c_firewood_energy_S1_Tgr60, 'F6-0-2 (t-C)': TestDSM1_Tgr60.o, 'St-7 (t-C)': stocks_S1_Tgr_60y[:,0], 'F7-0 (t-C)': Landfill_decomp_PF_FP_S1_Tgr_60y, 'F8-0 (t-C)': PH_Emissions_HWP1_Tgr_60y, 'S9-0 (t)': NonRW_amount_S1_Tgr_60y, 'F9-0 (t-C)': emissions_NonRW_S1_Tgr_60y, }) ##S1_E_Hbr_40y ## define the input flow for the landfill (F5-7) OC_storage_E_Hbr40 = dfE_Hbr40['Other_C_storage'].values OC_storage_E_Hbr40 = [x/division for x in OC_storage_E_Hbr40] OC_storage_E_Hbr40 = [abs(number) for number in OC_storage_E_Hbr40] C_LF_E_Hbr40 = [x*1/0.82 for x in OC_storage_E_Hbr40] ## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3) HWP_E_Hbr40 = [x/division for x in dfE_Hbr40['Wood_pellets'].values] HWP_E_Hbr40_energy = [x*1/3 for x in c_firewood_energy_E_Hbr40] HWP_E_Hbr40_landfill = [x*1/0.82 for x in OC_storage_E_Hbr40] HWP_E_Hbr40_sum = [HWP_E_Hbr40, HWP_E_Hbr40_energy, HWP_E_Hbr40_landfill] HWP_E_Hbr40_sum = [sum(x) for x in zip(*HWP_E_Hbr40_sum )] ## in-use stocks (S-4) TestDSME_Hbr40.s = [x/division for x in TestDSME_Hbr40.s] ## calculate C stocks in landfill (S-7) tf = 201 zero_matrix_stocks_E_Hbr_40y = (tf,1) stocks_E_Hbr_40y = np.zeros(zero_matrix_stocks_E_Hbr_40y) i = 0 stocks_E_Hbr_40y[0] = C_LF_E_Hbr40[0] - Landfill_decomp_PF_FP_E_Hbr_40y[0] while i < tf-1: stocks_E_Hbr_40y[i+1] = np.array(C_LF_E_Hbr40[i+1] - Landfill_decomp_PF_FP_E_Hbr_40y[i+1] + stocks_E_Hbr_40y[i]) i = i + 1 ## calculate aggregate flow of logged wood (F1-2) HWP_logged_E_Hbr_40y = [x1+x2 for (x1,x2) in zip(HWP_E_Hbr40_sum, [x*2/3 for x in c_firewood_energy_E_Hbr40])] #calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c) tf = 201 zero_matrix_ForCstocks_E_Hbr_40y = (tf,1) ForCstocks_E_Hbr_40y = np.zeros(zero_matrix_ForCstocks_E_Hbr_40y) i = 0 ForCstocks_E_Hbr_40y[0] = initAGB - flat_list_Hbr_40y[0] - decomp_tot_E_Hbr_40y[0] - HWP_logged_E_Hbr_40y[0] while i < tf-1: ForCstocks_E_Hbr_40y[i+1] = np.array(ForCstocks_E_Hbr_40y[i] - flat_list_Hbr_40y[i+1] - decomp_tot_E_Hbr_40y[i+1] - HWP_logged_E_Hbr_40y[i+1]) i = i + 1 ##NonRW materials/energy amount (F9-0-1) dfE_amount_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_FP.xlsx', 'PF_FP_E_Hbr_40y') NonRW_amount_E_Hbr_40y = dfE_amount_Hbr40['NonRW_amount'].values NonRW_amount_E_Hbr_40y = [x/1000 for x in NonRW_amount_E_Hbr_40y] ##NonRW emissions (F9-0-2) emissions_NonRW_E_Hbr_40y = [x/division for x in emissions_NonRW_E_Hbr_40y] #create columns dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Column1, 'F0-1 (t-C)': flat_list_Hbr_40y, 'F1-0 (t-C)': decomp_tot_E_Hbr_40y, #'F1a-2 (t-C)': PF_S2_Tgr_60y, #'F1c-2 (t-C)': FP_S2_Tgr_60y, 'F1-2 (t-C)': HWP_logged_E_Hbr_40y, 'St-1 (t-C)':ForCstocks_E_Hbr_40y[:,0], 'F2-3 (t-C)': HWP_E_Hbr40_sum, 'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_E_Hbr40], 'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_E_Hbr40_sum, [x*1/0.82 for x in OC_storage_E_Hbr40], [x*1/3 for x in c_firewood_energy_E_Hbr40])], 'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_E_Hbr40], 'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_E_Hbr40], 'F4-0 (t-C)': c_pellets_Hbr_40y, 'St-4 (t-C)': TestDSME_Hbr40.s, #'S-4-i (t-C)': TestDSME_Hbr40.i, 'F4-5 (t-C)': TestDSME_Hbr40.o, 'F5-6 (t-C)': TestDSME_Hbr40.o, 'F5-7 (t-C)': C_LF_E_Hbr40, 'F6-0-1 (t-C)': c_firewood_energy_E_Hbr40, 'F6-0-2 (t-C)': TestDSME_Hbr40.o, 'St-7 (t-C)': stocks_E_Hbr_40y[:,0], 'F7-0 (t-C)': Landfill_decomp_PF_FP_E_Hbr_40y, 'F8-0 (t-C)': PH_Emissions_HWPE_Hbr_40y, 'S9-0 (t)': NonRW_amount_E_Hbr_40y, 'F9-0 (t-C)': emissions_NonRW_E_Hbr_40y, }) writer = pd.ExcelWriter('C_flows_SysDef_PF_FP_EC_RB.xlsx', engine = 'xlsxwriter') dfM_Ac_7y.to_excel(writer, sheet_name = 'PF_FP_M_EC_Ac_7y', header=True, index=False) dfM_Ac_18y.to_excel(writer, sheet_name = 'PF_FP_M_EC_Ac_18y', header=True, index=False) dfM_Tgr_60y.to_excel(writer, sheet_name = 'PF_FP_M_EC_Tgr_60y', header=True, index=False) dfE_Hbr_40y.to_excel(writer, sheet_name = 'PF_FP_E_EC_Hbr_40y', header=True, index=False) writer.save() writer.close() #%%
34.477886
268
0.706152
4a011f256228518dec00d3ab3b20f9fbe49bf7c1
487
py
Python
env/Lib/site-packages/plotly/validators/scatter3d/textfont/_size.py
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
8a4707301d19c3826c31026c4077930bcd6a8182
[ "MIT" ]
11,750
2015-10-12T07:03:39.000Z
2022-03-31T20:43:15.000Z
venv/Lib/site-packages/plotly/validators/scatter3d/textfont/_size.py
wakisalvador/constructed-misdirection
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
[ "Unlicense" ]
2,951
2015-10-12T00:41:25.000Z
2022-03-31T22:19:26.000Z
venv/Lib/site-packages/plotly/validators/scatter3d/textfont/_size.py
wakisalvador/constructed-misdirection
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
[ "Unlicense" ]
2,623
2015-10-15T14:40:27.000Z
2022-03-28T16:05:50.000Z
import _plotly_utils.basevalidators class SizeValidator(_plotly_utils.basevalidators.NumberValidator): def __init__(self, plotly_name="size", parent_name="scatter3d.textfont", **kwargs): super(SizeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), min=kwargs.pop("min", 1), **kwargs )
34.785714
87
0.64271
4a011f817b82ffc6c0c11887726e5d53ecc0fd75
1,576
py
Python
makeplugin.py
Kolbo5/FanFicFare
cf2ae9b12631bfeeb9198ca686f9e58d4579aeb3
[ "Apache-2.0" ]
1
2020-03-26T05:44:01.000Z
2020-03-26T05:44:01.000Z
makeplugin.py
Kolbo5/FanFicFare
cf2ae9b12631bfeeb9198ca686f9e58d4579aeb3
[ "Apache-2.0" ]
null
null
null
makeplugin.py
Kolbo5/FanFicFare
cf2ae9b12631bfeeb9198ca686f9e58d4579aeb3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2018, Jim Miller # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from glob import glob from makezip import createZipFile if __name__=="__main__": filename="FanFicFare.zip" exclude=['*.pyc','*~','*.xcf','*[0-9].png','*.po','*.pot','*default.mo','*Thumbs.db'] os.chdir('calibre-plugin') files=['plugin-defaults.ini','plugin-example.ini','about.html', 'images','translations'] files.extend(glob('*.py')) files.extend(glob('plugin-import-name-*.txt')) # 'w' for overwrite createZipFile("../"+filename,"w", files, exclude=exclude) os.chdir('../included_dependencies') files=['bs4','chardet','html2text','soupsieve','backports'] ## Kept only for v2.85.1 support now. createZipFile("../"+filename,"a", files, exclude=exclude) os.chdir('..') # 'a' for append files=['fanficfare'] createZipFile(filename,"a", files, exclude=exclude)
30.901961
89
0.631345
4a011ff0351bd9780d1a4806fa9d08a1520c43c0
2,979
py
Python
src/arch/x86/isa/insts/general_purpose/string/store_string.py
qianlong4526888/haha
01baf923693873c11ae072ce4dde3d8f1d7b6239
[ "BSD-3-Clause" ]
135
2016-10-21T03:31:49.000Z
2022-03-25T01:22:20.000Z
src/arch/x86/isa/insts/general_purpose/string/store_string.py
qianlong4526888/haha
01baf923693873c11ae072ce4dde3d8f1d7b6239
[ "BSD-3-Clause" ]
35
2017-03-10T17:57:46.000Z
2022-02-18T17:34:16.000Z
src/arch/x86/isa/insts/general_purpose/string/store_string.py
qianlong4526888/haha
01baf923693873c11ae072ce4dde3d8f1d7b6239
[ "BSD-3-Clause" ]
48
2016-12-08T12:03:13.000Z
2022-02-16T09:16:13.000Z
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop STOS_M { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 movi t3, t3, dsz, flags=(CEZF,), dataSize=asz subi t4, t0, dsz, dataSize=asz mov t3, t3, t4, flags=(nCEZF,), dataSize=asz st rax, es, [1, t0, rdi] add rdi, rdi, t3, dataSize=asz }; def macroop STOS_E_M { and t0, rcx, rcx, flags=(EZF,), dataSize=asz br label("end"), flags=(CEZF,) # Find the constant we need to either add or subtract from rdi ruflag t0, 10 movi t3, t3, dsz, flags=(CEZF,), dataSize=asz subi t4, t0, dsz, dataSize=asz mov t3, t3, t4, flags=(nCEZF,), dataSize=asz topOfLoop: st rax, es, [1, t0, rdi] subi rcx, rcx, 1, flags=(EZF,), dataSize=asz add rdi, rdi, t3, dataSize=asz br label("topOfLoop"), flags=(nCEZF,) end: fault "NoFault" }; '''
42.557143
72
0.746224
4a011ff6ed749dd118f43874b5e435ac584d7438
18,132
py
Python
torchbenchmark/models/nvidia_deeprecommender/nvtrain.py
LaudateCorpus1/benchmark
2a8528f91dfbbd880e514b4deefa692a48c32af8
[ "BSD-3-Clause" ]
null
null
null
torchbenchmark/models/nvidia_deeprecommender/nvtrain.py
LaudateCorpus1/benchmark
2a8528f91dfbbd880e514b4deefa692a48c32af8
[ "BSD-3-Clause" ]
null
null
null
torchbenchmark/models/nvidia_deeprecommender/nvtrain.py
LaudateCorpus1/benchmark
2a8528f91dfbbd880e514b4deefa692a48c32af8
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2017 NVIDIA Corporation # to run against cuda: # --gpu_ids 0 --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_VALID --hidden_layers 512,512,1024 --non_linearity_type selu --batch_size 128 --logdir model_save --drop_prob 0.8 --optimizer momentum --lr 0.005 --weight_decay 0 --aug_step 1 --noise_prob 0 --num_epochs 1 --summary_frequency 1000 --forcecuda # to run on cpu: # --gpu_ids 0 --path_to_train_data Netflix/N1W_TRAIN --path_to_eval_data Netflix/N1W_VALID --hidden_layers 512,512,1024 --non_linearity_type selu --batch_size 128 --logdir model_save --drop_prob 0.8 --optimizer momentum --lr 0.005 --weight_decay 0 --aug_step 1 --noise_prob 0 --num_epochs 1 --summary_frequency 1000 --forcecpu import torch import argparse from .reco_encoder.data import input_layer from .reco_encoder.model import model import torch.optim as optim from torch.optim.lr_scheduler import MultiStepLR import torch.nn as nn from torch.autograd import Variable import copy import time from pathlib import Path #from .logger import Logger from math import sqrt import numpy as np import os import torch.autograd.profiler as profiler def getTrainBenchmarkArgs() : class Args: pass args = Args() args.lr = 0.005 args.weight_decay = 0 args.drop_prob = 0.8 args.noise_prob = 0 args.batch_size = 128 args.summary_frequency = 1000 args.aug_step = 1 args.constrained = False args.skip_last_layer_nl = False args.num_epochs = 1 args.save_every = 3 args.optimizer = 'momentum' args.hidden_layers = '512,512,1024' args.gpu_ids = '0' args.path_to_train_data = os.path.dirname(__file__) + '/Netflix/N1W_TRAIN' args.path_to_eval_data = os.path.dirname(__file__) + '/Netflix/N1W_VALID' args.non_linearity_type = 'selu' args.logdir = 'model_save' args.nooutput = True args.silent = True args.forcecuda = False args.forcecpu = False args.profile = False return args def getTrainCommandLineArgs() : parser = argparse.ArgumentParser(description='RecoEncoder') parser.add_argument('--lr', type=float, default=0.00001, metavar='N', help='learning rate') parser.add_argument('--weight_decay', type=float, default=0.0, metavar='N', help='L2 weight decay') parser.add_argument('--drop_prob', type=float, default=0.0, metavar='N', help='dropout drop probability') parser.add_argument('--noise_prob', type=float, default=0.0, metavar='N', help='noise probability') parser.add_argument('--batch_size', type=int, default=64, metavar='N', help='global batch size') parser.add_argument('--summary_frequency', type=int, default=100, metavar='N', help='how often to save summaries') parser.add_argument('--aug_step', type=int, default=-1, metavar='N', help='do data augmentation every X step') parser.add_argument('--constrained', action='store_true', help='constrained autoencoder') parser.add_argument('--skip_last_layer_nl', action='store_true', help='if present, decoder\'s last layer will not apply non-linearity function') parser.add_argument('--num_epochs', type=int, default=50, metavar='N', help='maximum number of epochs') parser.add_argument('--save_every', type=int, default=3, metavar='N', help='save every N number of epochs') parser.add_argument('--optimizer', type=str, default="momentum", metavar='N', help='optimizer kind: adam, momentum, adagrad or rmsprop') parser.add_argument('--hidden_layers', type=str, default="1024,512,512,128", metavar='N', help='hidden layer sizes, comma-separated') parser.add_argument('--gpu_ids', type=str, default="0", metavar='N', help='comma-separated gpu ids to use for data parallel training') parser.add_argument('--path_to_train_data', type=str, default="", metavar='N', help='Path to training data') parser.add_argument('--path_to_eval_data', type=str, default="", metavar='N', help='Path to evaluation data') parser.add_argument('--non_linearity_type', type=str, default="selu", metavar='N', help='type of the non-linearity used in activations') parser.add_argument('--logdir', type=str, default="logs", metavar='N', help='where to save model and write logs') parser.add_argument('--nooutput', action='store_true', help='disable writing output to file') parser.add_argument('--silent', action='store_true', help='disable all messages') parser.add_argument('--forcecuda', action='store_true', help='force cuda use') parser.add_argument('--forcecpu', action='store_true', help='force cpu use') parser.add_argument('--profile', action='store_true', help='enable profiler and stat print') args = parser.parse_args() return args def processTrainArgState(args) : if not args.silent: print(args) if args.forcecpu and args.forcecuda: print("Error, force cpu and cuda cannot both be set") quit() args.use_cuda = torch.cuda.is_available() # global flag if not args.silent: if args.use_cuda: print('GPU is available.') else: print('GPU is not available.') if args.use_cuda and args.forcecpu: args.use_cuda = False if not args.silent: if args.use_cuda: print('Running On CUDA') else: print('Running On CPU') return args def log_var_and_grad_summaries(logger, layers, global_step, prefix, log_histograms=False): """ Logs variable and grad stats for layer. Transfers data from GPU to CPU automatically :param logger: TB logger :param layers: param list :param global_step: global step for TB :param prefix: name prefix :param log_histograms: (default: False) whether or not log histograms :return: """ for ind, w in enumerate(layers): # Variables w_var = w.data.cpu().numpy() logger.scalar_summary("Variables/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_var), global_step) if log_histograms: logger.histo_summary(tag="Variables/{}_{}".format(prefix, ind), values=w.data.cpu().numpy(), step=global_step) # Gradients w_grad = w.grad.data.cpu().numpy() logger.scalar_summary("Gradients/FrobNorm/{}_{}".format(prefix, ind), np.linalg.norm(w_grad), global_step) if log_histograms: logger.histo_summary(tag="Gradients/{}_{}".format(prefix, ind), values=w.grad.data.cpu().numpy(), step=global_step) def DoTrainEval(encoder, evaluation_data_layer, use_cuda): encoder.eval() denom = 0.0 total_epoch_loss = 0.0 for i, (eval, src) in enumerate(evaluation_data_layer.iterate_one_epoch_eval()): inputs = Variable(src.cuda().to_dense() if use_cuda else src.to_dense()) targets = Variable(eval.cuda().to_dense() if use_cuda else eval.to_dense()) outputs = encoder(inputs) loss, num_ratings = model.MSEloss(outputs, targets) total_epoch_loss += loss.item() denom += num_ratings.item() return sqrt(total_epoch_loss / denom) class DeepRecommenderTrainBenchmark: def __init__(self, device="cpu", jit=False, batch_size=256, processCommandLine = False): self.TrainInit(device, jit, batch_size, processCommandLine) def TrainInit(self, device="cpu", jit=False, batch_size=256, processCommandLine = False): # Force test to run in toy mode. Single call of fake data to model. self.toytest = True self.toybatch = batch_size # number of movies in netflix training set. self.toyvocab = 197951 self.toyinputs = torch.randn(self.toybatch, self.toyvocab) if (processCommandLine) : self.args = getTrainCommandLineArgs() else: self.args = getTrainBenchmarkArgs() if device == "cpu": forcecuda = False elif device == "cuda": forcecuda = True else: # unknown device string, quit init return self.args.forcecuda = forcecuda self.args.forcecpu = not forcecuda self.args = processTrainArgState(self.args) if self.toytest == False: self.logger = Logger(self.args.logdir) self.params = dict() self.params['batch_size'] = self.args.batch_size self.params['data_dir'] = self.args.path_to_train_data self.params['major'] = 'users' self.params['itemIdInd'] = 1 self.params['userIdInd'] = 0 if self.toytest == False: if not self.args.silent: print("Loading training data") self.data_layer = input_layer.UserItemRecDataProvider(params=self.params) if not self.args.silent: print("Data loaded") print("Total items found: {}".format(len(self.data_layer.data.keys()))) print("Vector dim: {}".format(self.data_layer.vector_dim)) print("Loading eval data") self.eval_params = copy.deepcopy(self.params) # must set eval batch size to 1 to make sure no examples are missed if self.toytest: self.rencoder = model.AutoEncoder(layer_sizes=[self.toyvocab] + [int(l) for l in self.args.hidden_layers.split(',')], nl_type=self.args.non_linearity_type, is_constrained=self.args.constrained, dp_drop_prob=self.args.drop_prob, last_layer_activations=not self.args.skip_last_layer_nl) else: self.eval_params['data_dir'] = self.args.path_to_eval_data self.eval_data_layer = input_layer.UserItemRecDataProvider(params=self.eval_params, user_id_map=self.data_layer.userIdMap, # the mappings are provided item_id_map=self.data_layer.itemIdMap) self.eval_data_layer.src_data = self.data_layer.data self.rencoder = model.AutoEncoder(layer_sizes=[self.data_layer.vector_dim] + [int(l) for l in self.args.hidden_layers.split(',')], nl_type=self.args.non_linearity_type, is_constrained=self.args.constrained, dp_drop_prob=self.args.drop_prob, last_layer_activations=not self.args.skip_last_layer_nl) os.makedirs(self.args.logdir, exist_ok=True) self.model_checkpoint = self.args.logdir + "/model" self.path_to_model = Path(self.model_checkpoint) if self.path_to_model.is_file(): print("Loading model from: {}".format(self.model_checkpoint)) self.rencoder.load_state_dict(torch.load(self.model_checkpoint)) if not self.args.silent: print('######################################################') print('######################################################') print('############# AutoEncoder Model: #####################') print(self.rencoder) print('######################################################') print('######################################################') if self.args.use_cuda: gpu_ids = [int(g) for g in self.args.gpu_ids.split(',')] if not self.args.silent: print('Using GPUs: {}'.format(gpu_ids)) if len(gpu_ids)>1: self.rencoder = nn.DataParallel(self.rencoder, device_ids=gpu_ids) self.rencoder = self.rencoder.cuda() self.toyinputs = self.toyinputs.to(device) if self.args.optimizer == "adam": self.optimizer = optim.Adam(self.rencoder.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay) elif self.args.optimizer == "adagrad": self.optimizer = optim.Adagrad(self.rencoder.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay) elif self.args.optimizer == "momentum": self.optimizer = optim.SGD(self.rencoder.parameters(), lr=self.args.lr, momentum=0.9, weight_decay=self.args.weight_decay) self.scheduler = MultiStepLR(self.optimizer, milestones=[24, 36, 48, 66, 72], gamma=0.5) elif args.optimizer == "rmsprop": self.optimizer = optim.RMSprop(self.rencoder.parameters(), lr=self.args.lr, momentum=0.9, weight_decay=self.args.weight_decay) else: raise ValueError('Unknown optimizer kind') self.t_loss = 0.0 self.t_loss_denom = 0.0 self.denom = 0.0 self.total_epoch_loss = 0.0 self.global_step = 0 if self.args.noise_prob > 0.0: self.dp = nn.Dropout(p=self.args.noise_prob) def DoTrain(self): self.rencoder.train() #if self.args.optimizer == "momentum": # self.scheduler.step() for i, mb in enumerate(self.data_layer.iterate_one_epoch()): inputs = Variable(mb.cuda().to_dense() if self.args.use_cuda else mb.to_dense()) self.optimizer.zero_grad() outputs = self.rencoder(inputs) loss, num_ratings = model.MSEloss(outputs, inputs) loss = loss / num_ratings loss.backward() self.optimizer.step() self.global_step += 1 self.t_loss += loss.item() self.t_loss_denom += 1 if not self.args.nooutput: if i % self.args.summary_frequency == 0: print('[%d, %5d] RMSE: %.7f' % (self.epoch, i, sqrt(self.t_loss / self.t_loss_denom))) self.logger.scalar_summary("Training_RMSE", sqrt(self.t_loss/self.t_loss_denom), self.global_step) self.t_loss = 0 self.t_loss_denom = 0.0 log_var_and_grad_summaries(self.logger, self.rencoder.encode_w, self.global_step, "Encode_W") log_var_and_grad_summaries(self.logger, self.rencoder.encode_b, self.global_step, "Encode_b") if not self.rencoder.is_constrained: log_var_and_grad_summaries(self.logger, self.rencoder.decode_w, self.global_step, "Decode_W") log_var_and_grad_summaries(self.logger, self.rencoder.decode_b, self.global_step, "Decode_b") self.total_epoch_loss += loss.item() self.denom += 1 #if args.aug_step > 0 and i % args.aug_step == 0 and i > 0: if self.args.aug_step > 0: # Magic data augmentation trick happen here for t in range(self.args.aug_step): inputs = Variable(outputs.data) if self.args.noise_prob > 0.0: inputs = dp(inputs) self.optimizer.zero_grad() outputs = self.rencoder(inputs) loss, num_ratings = model.MSEloss(outputs, inputs) loss = loss / num_ratings loss.backward() self.optimizer.step() def train(self, niter=1) : for self.epoch in range(niter): if self.toytest: self.rencoder.train() self.optimizer.zero_grad() outputs = self.rencoder(self.toyinputs) loss, num_ratings = model.MSEloss(outputs, self.toyinputs) loss = loss / num_ratings loss.backward() self.optimizer.step() continue if not self.args.silent: print('Doing epoch {} of {}'.format(self.epoch, niter)) print('Timing Start') e_start_time = time.time() self.DoTrain() if not self.args.silent: e_end_time = time.time() print('Timing End') if self.args.profile: print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10)) prof.export_chrome_trace("trace.json") print('Total epoch {} finished in {} seconds with TRAINING RMSE loss: {}' .format(self.epoch, e_end_time - e_start_time, sqrt(self.total_epoch_loss/self.denom))) if not self.args.silent: self.logger.scalar_summary("Training_RMSE_per_epoch", sqrt(self.total_epoch_loss/self.denom), self.epoch) self.logger.scalar_summary("Epoch_time", e_end_time - e_start_time, self.epoch) if self.epoch % self.args.save_every == 0 or self.epoch == self.args.num_epochs - 1: eval_loss = DoTrainEval(self.rencoder, self.eval_data_layer, self.args.use_cuda) print('Epoch {} EVALUATION LOSS: {}'.format(self.epoch, eval_loss)) self.logger.scalar_summary("EVALUATION_RMSE", eval_loss, self.epoch) print("Saving model to {}".format(self.model_checkpoint + ".epoch_"+str(self.epoch))) torch.save(self.rencoder.state_dict(), self.model_checkpoint + ".epoch_"+str(self.epoch)) if not self.args.nooutput: print("Saving model to {}".format(self.model_checkpoint + ".last")) torch.save(self.rencoder.state_dict(), self.model_checkpoint + ".last") # save to onnx dummy_input = Variable(torch.randn(self.params['batch_size'], self.data_layer.vector_dim).type(torch.float)) torch.onnx.export(self.rencoder.float(), dummy_input.cuda() if self.args.use_cuda else dummy_input, self.model_checkpoint + ".onnx", verbose=True) print("ONNX model saved to {}!".format(self.model_checkpoint + ".onnx")) def TimedTrainingRun(self): if self.args.profile: with profiler.profile(record_shapes=True, use_cuda=self.args.use_cuda) as prof: with profiler.record_function("training_epoch"): self.train(self.args.num_epochs) else: self.train(self.args.num_epochs) def main() : gpuTrain = DeepRecommenderTrainBenchmark(device = 'cuda') gpuTrain.TimedTrainingRun() gpuTrain = DeepRecommenderBenchmark(device = 'cpu') gpuTrain.TimedTrainingRun() if __name__ == '__main__': main()
41.39726
327
0.627234
4a011ffe6a61ef32e0b2da05c4c16ff644700492
22,820
py
Python
salt/pillar/makostack.py
yuriks/salt
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
[ "Apache-2.0", "MIT" ]
1
2020-03-31T22:51:16.000Z
2020-03-31T22:51:16.000Z
salt/pillar/makostack.py
yuriks/salt
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
[ "Apache-2.0", "MIT" ]
null
null
null
salt/pillar/makostack.py
yuriks/salt
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
[ "Apache-2.0", "MIT" ]
1
2021-09-30T07:00:01.000Z
2021-09-30T07:00:01.000Z
# -*- coding: utf-8 -*- ''' Simple and flexible YAML ext_pillar which can read pillar from within pillar. .. versionadded:: 2016.3.0 This custom saltstack ``ext_pillar`` is a direct ripoff of the 'stack' ext_pillar, simply ported to use mako instead of jinja2 for templating. It supports the following features: - multiple config files that are mako templates with support for ``pillar``, ``__grains__``, ``__salt__``, ``__opts__`` objects. - a config file renders as an ordered list of files. Unless absolute, the paths of these files are relative to the current config file - if absolute, they will be treated literally. - this list of files are read in order as mako templates with support for ``stack``, ``pillar``, ``__grains__``, ``__salt__``, ``__opts__`` objects. - all these rendered files are then parsed as ``yaml``. - then all yaml dicts are merged in order, with support for the following. merging strategies: ``merge-first``, ``merge-last``, ``remove``, and ``overwrite``. - stack config files can be matched based on ``pillar``, ``grains``, or ``opts`` values, which make it possible to support kind of self-contained environments. Configuration in Salt --------------------- Like any other external pillar, its configuration takes place through the ``ext_pillar`` key in the master config file. However, you can configure MakoStack in 3 different ways: Single config file ~~~~~~~~~~~~~~~~~~ This is the simplest option, you just need to set the path to your single MakoStack config file like below: .. code:: yaml ext_pillar: - makostack: /path/to/stack.cfg List of config files ~~~~~~~~~~~~~~~~~~~~ You can also provide a list of config files: .. code:: yaml ext_pillar: - makostack: - /path/to/stack1.cfg - /path/to/stack2.cfg Select config files through grains|pillar|opts matching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can also opt for a much more flexible configuration: MakoStack allows one to select the config files for the current minion based on matching values from either grains, or pillar, or opts objects. Here is an example of such a configuration, which should speak by itself: .. code:: yaml ext_pillar: - makostack: pillar:environment: dev: /path/to/dev/stack.cfg prod: /path/to/prod/stack.cfg grains:custom:grain: value: - /path/to/stack1.cfg - /path/to/stack2.cfg opts:custom:opt: value: /path/to/stack0.cfg Grafting data from files to arbitrary namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ An extended syntax for config files permits defining "graft points" on a per-config-file basis. As an example, if the file foo.cfg would produce the following: .. code:: yaml foo: - bar - baz and you specified the cfg file as /path/to/foo.cfg:yummy:fur, the following would actually end up in pillar after all merging was complete: .. code:: yaml yummy: fur: foo: - bar - baz MakoStack configuration files ----------------------------- The config files that are referenced in the above ``ext_pillar`` configuration are mako templates which must render as a simple ordered list of ``yaml`` files that will then be merged to build pillar data. Unless an absolute path name is specified, the path of these ``yaml`` files is assumed to be relative to the directory containing the MakoStack config file. If a path begins with '/', however, it will be treated literally and can be anywhere on the filesystem. The following variables are available in mako templating of makostack configuration files: - ``pillar``: the pillar data (as passed by Salt to our ``ext_pillar`` function) - ``minion_id``: the minion id ;-) - ``__opts__``: a dictionary of mostly Salt configuration options - ``__grains__``: a dictionary of the grains of the minion making this pillar call - ``__salt__``: a dictionary of Salt module functions, useful so you don't have to duplicate functions that already exist (note: runs on the master) So you can use all the power of mako to build your list of ``yaml`` files that will be merged in pillar data. For example, you could have a MakoStack config file which looks like: .. code:: mako $ cat /path/to/stack/config.cfg core.yml osarchs/%{ __grains__['osarch'] }}.yml oscodenames/%{ __grains__['oscodename'] }.yml % for role in pillar.get('roles', []): roles/%{ role }.yml % endfor minions/%{ minion_id }.yml And the whole directory structure could look like: .. code:: $ tree /path/to/stack/ /path/to/stack/ ├── config.cfg ├── core.yml ├── osarchs/ │   ├── amd64.yml │   └── armhf.yml ├── oscodenames/ │   ├── wheezy.yml │   └── jessie.yml ├── roles/ │   ├── web.yml │   └── db.yml └── minions/ ├── test-1-dev.yml └── test-2-dev.yml Overall process --------------- In the above MakoStack configuration, given that test-1-dev minion is an amd64 platform running Debian Jessie, and which pillar ``roles`` is ``["db"]``, the following ``yaml`` files would be merged in order: - ``core.yml`` - ``osarchs/amd64.yml`` - ``oscodenames/jessie.yml`` - ``roles/db.yml`` - ``minions/test-1-dev.yml`` Before merging, every files above will be preprocessed as mako templates. The following variables are available in mako templating of ``yaml`` files: - ``stack``: the MakoStack pillar data object that has currently been merged (data from previous ``yaml`` files in MakoStack configuration) - ``pillar``: the pillar data (as passed by Salt to our ``ext_pillar`` function) - ``minion_id``: the minion id ;-) - ``__opts__``: a dictionary of mostly Salt configuration options - ``__grains__``: a dictionary of the grains of the minion making this pillar call - ``__salt__``: a dictionary of Salt module functions, useful so you don't have to duplicate functions that already exist (note: runs on the master) So you can use all the power of mako to build your pillar data, and even use other pillar values that has already been merged by MakoStack (from previous ``yaml`` files in MakoStack configuration) through the ``stack`` variable. Once a ``yaml`` file has been preprocessed by mako, we obtain a Python dict - let's call it ``yml_data`` - then, MakoStack will merge this ``yml_data`` dict in the main ``stack`` dict (which contains already merged MakoStack pillar data). By default, MakoStack will deeply merge ``yml_data`` in ``stack`` (similarly to the ``recurse`` salt ``pillar_source_merging_strategy``), but 3 merging strategies are currently available for you to choose (see next section). Once every ``yaml`` files have been processed, the ``stack`` dict will contain your whole own pillar data, merged in order by MakoStack. So MakoStack ``ext_pillar`` returns the ``stack`` dict, the contents of which Salt takes care to merge in with all of the other pillars and finally return the whole pillar to the minion. Merging strategies ------------------ The way the data from a new ``yaml_data`` dict is merged with the existing ``stack`` data can be controlled by specifying a merging strategy. Right now this strategy can either be ``merge-last`` (the default), ``merge-first``, ``remove``, or ``overwrite``. Note that scalar values like strings, integers, booleans, etc. are always evaluated using the ``overwrite`` strategy (other strategies don't make sense in that case). The merging strategy can be set by including a dict in the form of: .. code:: yaml __: <merging strategy> as the first item of the dict or list. This allows fine grained control over the merging process. ``merge-last`` (default) strategy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the ``merge-last`` strategy is selected (the default), then content of dict or list variables is merged recursively with previous definitions of this variable (similarly to the ``recurse`` salt ``pillar_source_merging_strategy``). This allows for extending previously defined data. ``merge-first`` strategy ~~~~~~~~~~~~~~~~~~~~~~~~ If the ``merge-first`` strategy is selected, then the content of dict or list variables are swapped between the ``yaml_data`` and ``stack`` objects before being merged recursively with the ``merge-last`` previous strategy. ``remove`` strategy ~~~~~~~~~~~~~~~~~~~ If the ``remove`` strategy is selected, then content of dict or list variables in ``stack`` are removed only if the corresponding item is present in the ``yaml_data`` dict. This allows for removing items from previously defined data. ``overwrite`` strategy ~~~~~~~~~~~~~~~~~~~~~~ If the ``overwrite`` strategy is selected, then the content of dict or list variables in ``stack`` is overwritten by the content of ``yaml_data`` dict. So this allows one to overwrite variables from previous definitions. Merging examples ---------------- Let's go through small examples that should clarify what's going on when a ``yaml_data`` dict is merged in the ``stack`` dict. When you don't specify any strategy, the default ``merge-last`` strategy is selected: +----------------------+-----------------------+-------------------------+ | ``stack`` | ``yaml_data`` | ``stack`` (after merge) | +======================+=======================+=========================+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | tom: | tom: | tom: | | uid: 500 | uid: 1000 | uid: 1000 | | roles: | roles: | roles: | | - sysadmin | - developer | - sysadmin | | root: | mat: | - developer | | uid: 0 | uid: 1001 | mat: | | | | uid: 1001 | | | | root: | | | | uid: 0 | +----------------------+-----------------------+-------------------------+ Then you can select a custom merging strategy using the ``__`` key in a dict: +----------------------+-----------------------+-------------------------+ | ``stack`` | ``yaml_data`` | ``stack`` (after merge) | +======================+=======================+=========================+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | tom: | __: merge-last | tom: | | uid: 500 | tom: | uid: 1000 | | roles: | uid: 1000 | roles: | | - sysadmin | roles: | - sysadmin | | root: | - developer | - developer | | uid: 0 | mat: | mat: | | | uid: 1001 | uid: 1001 | | | | root: | | | | uid: 0 | +----------------------+-----------------------+-------------------------+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | tom: | __: merge-first | tom: | | uid: 500 | tom: | uid: 500 | | roles: | uid: 1000 | roles: | | - sysadmin | roles: | - developer | | root: | - developer | - sysadmin | | uid: 0 | mat: | mat: | | | uid: 1001 | uid: 1001 | | | | root: | | | | uid: 0 | +----------------------+-----------------------+-------------------------+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | tom: | __: remove | root: | | uid: 500 | tom: | uid: 0 | | roles: | mat: | | | - sysadmin | | | | root: | | | | uid: 0 | | | +----------------------+-----------------------+-------------------------+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | tom: | __: overwrite | tom: | | uid: 500 | tom: | uid: 1000 | | roles: | uid: 1000 | roles: | | - sysadmin | roles: | - developer | | root: | - developer | mat: | | uid: 0 | mat: | uid: 1001 | | | uid: 1001 | | +----------------------+-----------------------+-------------------------+ You can also select a custom merging strategy using a ``__`` object in a list: +----------------+-------------------------+-------------------------+ | ``stack`` | ``yaml_data`` | ``stack`` (after merge) | +================+=========================+=========================+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | - tom | - __: merge-last | - tom | | - root | - mat | - root | | | | - mat | +----------------+-------------------------+-------------------------+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | - tom | - __: merge-first | - mat | | - root | - mat | - tom | | | | - root | +----------------+-------------------------+-------------------------+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | - tom | - __: remove | - root | | - root | - mat | | | | - tom | | +----------------+-------------------------+-------------------------+ | .. code:: yaml | .. code:: yaml | .. code:: yaml | | | | | | users: | users: | users: | | - tom | - __: overwrite | - mat | | - root | - mat | | +----------------+-------------------------+-------------------------+ ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import functools import os import logging # Import Salt libs import salt.utils.yaml # Import 3rd-party libs from salt.ext import six try: from mako.lookup import TemplateLookup from mako import exceptions HAS_MAKO = True except ImportError: HAS_MAKO = False log = logging.getLogger(__name__) strategies = ('overwrite', 'merge-first', 'merge-last', 'remove') __virtualname__ = 'makostack' # Only load in this module if the EC2 configurations are in place def __virtual__(): ''' Set up the libcloud functions and check for EC2 configurations ''' if HAS_MAKO is True: return __virtualname__ return False def ext_pillar(minion_id, pillar, *args, **kwargs): import salt.utils.data stack = {} stack_config_files = list(args) traverse = { 'pillar': functools.partial(salt.utils.data.traverse_dict_and_list, pillar), 'grains': functools.partial(salt.utils.data.traverse_dict_and_list, __grains__), 'opts': functools.partial(salt.utils.data.traverse_dict_and_list, __opts__), } for matcher, matchs in six.iteritems(kwargs): t, matcher = matcher.split(':', 1) if t not in traverse: raise Exception('Unknown traverse option "{0}", ' 'should be one of {1}'.format(t, traverse.keys())) cfgs = matchs.get(traverse[t](matcher, None), []) if not isinstance(cfgs, list): cfgs = [cfgs] stack_config_files += cfgs for cfg in stack_config_files: if ':' in cfg: cfg, namespace = cfg.split(':', 1) else: namespace = None if not os.path.isfile(cfg): log.warning('Ignoring Stack cfg "%s": file does not exist', cfg) continue stack = _process_stack_cfg(cfg, stack, minion_id, pillar, namespace) return stack def _process_stack_cfg(cfg, stack, minion_id, pillar, namespace): basedir, filename = os.path.split(cfg) lookup = TemplateLookup(directories=[basedir]) tops = lookup.get_template(filename).render(__opts__=__opts__, __salt__=__salt__, __grains__=__grains__, minion_id=minion_id, pillar=pillar, stack=stack) for path in _parse_top_cfg(tops): dirs = [basedir] if path.startswith('/'): dirs += ['/'] lookup = TemplateLookup(directories=dirs) try: p = lookup.get_template(path).render(__opts__=__opts__, __salt__=__salt__, __grains__=__grains__, minion_id=minion_id, pillar=pillar, stack=stack) obj = salt.utils.yaml.safe_load(p) if not isinstance(obj, dict): log.info( 'Ignoring Stack template "%s": Can\'t parse as a valid ' 'yaml dictionary', path ) continue if namespace: for sub in namespace.split(':')[::-1]: obj = {sub: obj} stack = _merge_dict(stack, obj) log.info('Stack template "%s" parsed', path) except exceptions.TopLevelLookupException as e: log.info('Stack template "%s" not found.', path) continue except Exception as e: # pylint: disable=broad-except log.info('Ignoring Stack template "%s":', path) log.info('%s', exceptions.text_error_template().render()) continue return stack def _cleanup(obj): if obj: if isinstance(obj, dict): obj.pop('__', None) for k, v in six.iteritems(obj): obj[k] = _cleanup(v) elif isinstance(obj, list) and isinstance(obj[0], dict) \ and '__' in obj[0]: del obj[0] return obj def _merge_dict(stack, obj): strategy = obj.pop('__', 'merge-last') if strategy not in strategies: raise Exception('Unknown strategy "{0}", should be one of {1}'.format( strategy, strategies)) if strategy == 'overwrite': return _cleanup(obj) else: for k, v in six.iteritems(obj): if strategy == 'remove': stack.pop(k, None) continue if k in stack: if strategy == 'merge-first': # merge-first is same as merge-last but the other way round # so let's switch stack[k] and v stack_k = stack[k] stack[k] = _cleanup(v) v = stack_k if type(stack[k]) != type(v): log.debug( 'Force overwrite, types differ: \'%s\' != \'%s\'', stack[k], v ) stack[k] = _cleanup(v) elif isinstance(v, dict): stack[k] = _merge_dict(stack[k], v) elif isinstance(v, list): stack[k] = _merge_list(stack[k], v) else: stack[k] = v else: stack[k] = _cleanup(v) return stack def _merge_list(stack, obj): strategy = 'merge-last' if obj and isinstance(obj[0], dict) and '__' in obj[0]: strategy = obj[0]['__'] del obj[0] if strategy not in strategies: raise Exception('Unknown strategy "{0}", should be one of {1}'.format( strategy, strategies)) if strategy == 'overwrite': return obj elif strategy == 'remove': return [item for item in stack if item not in obj] elif strategy == 'merge-first': return obj + stack else: return stack + obj def _parse_top_cfg(content): ''' Allow top_cfg to be YAML ''' try: obj = salt.utils.yaml.safe_load(content) if isinstance(obj, list): return obj except Exception as e: # pylint: disable=broad-except pass return content.splitlines()
40.822898
88
0.478834
4a01205e4b6d5d8ef32f2f11ae9af2fd2571c7bc
6,079
py
Python
onnx/backend/test/case/node/rnn.py
How-Wang/onnx
c940fa3fea84948e46603cab2f86467291443beb
[ "Apache-2.0" ]
1
2022-02-04T07:45:14.000Z
2022-02-04T07:45:14.000Z
onnx/backend/test/case/node/rnn.py
How-Wang/onnx
c940fa3fea84948e46603cab2f86467291443beb
[ "Apache-2.0" ]
null
null
null
onnx/backend/test/case/node/rnn.py
How-Wang/onnx
c940fa3fea84948e46603cab2f86467291443beb
[ "Apache-2.0" ]
null
null
null
# SPDX-License-Identifier: Apache-2.0 from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np # type: ignore from typing import Any, Tuple import onnx from ..base import Base from . import expect class RNN_Helper(): def __init__(self, **params: Any) -> None: # RNN Input Names X = str('X') W = str('W') R = str('R') B = str('B') H_0 = str('initial_h') LAYOUT = str('layout') required_inputs = [X, W, R] for i in required_inputs: assert i in params, "Missing Required Input: {0}".format(i) self.num_directions = params[str(W)].shape[0] if self.num_directions == 1: for k in params.keys(): if k != X: params[k] = np.squeeze(params[k], axis=0) hidden_size = params[R].shape[-1] batch_size = params[X].shape[1] layout = params[LAYOUT] if LAYOUT in params else 0 x = params[X] x = x if layout == 0 else np.swapaxes(x, 0, 1) b = params[B] if B in params else np.zeros(2 * hidden_size, dtype=np.float32) h_0 = params[H_0] if H_0 in params else np.zeros((batch_size, hidden_size), dtype=np.float32) self.X = x self.W = params[W] self.R = params[R] self.B = b self.H_0 = h_0 self.LAYOUT = layout else: raise NotImplementedError() def f(self, x: np.ndarray) -> np.ndarray: return np.tanh(x) def step(self) -> Tuple[np.ndarray, np.ndarray]: seq_length = self.X.shape[0] hidden_size = self.H_0.shape[-1] batch_size = self.X.shape[1] Y = np.empty([seq_length, self.num_directions, batch_size, hidden_size]) h_list = [] H_t = self.H_0 for x in np.split(self.X, self.X.shape[0], axis=0): H = self.f(np.dot(x, np.transpose(self.W)) + np.dot(H_t, np.transpose(self.R)) + np.add( *np.split(self.B, 2))) h_list.append(H) H_t = H concatenated = np.concatenate(h_list) if self.num_directions == 1: Y[:, 0, :, :] = concatenated if self.LAYOUT == 0: Y_h = Y[-1] else: Y = np.transpose(Y, [2, 0, 1, 3]) Y_h = Y[:, :, -1, :] return Y, Y_h class RNN(Base): @staticmethod def export_defaults() -> None: input = np.array([[[1., 2.], [3., 4.], [5., 6.]]]).astype(np.float32) input_size = 2 hidden_size = 4 weight_scale = 0.1 node = onnx.helper.make_node( 'RNN', inputs=['X', 'W', 'R'], outputs=['', 'Y_h'], hidden_size=hidden_size ) W = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32) R = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32) rnn = RNN_Helper(X=input, W=W, R=R) _, Y_h = rnn.step() expect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_simple_rnn_defaults') @staticmethod def export_initial_bias() -> None: input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32) input_size = 3 hidden_size = 5 custom_bias = 0.1 weight_scale = 0.1 node = onnx.helper.make_node( 'RNN', inputs=['X', 'W', 'R', 'B'], outputs=['', 'Y_h'], hidden_size=hidden_size ) W = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32) R = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32) # Adding custom bias W_B = custom_bias * np.ones((1, hidden_size)).astype(np.float32) R_B = np.zeros((1, hidden_size)).astype(np.float32) B = np.concatenate((W_B, R_B), axis=1) rnn = RNN_Helper(X=input, W=W, R=R, B=B) _, Y_h = rnn.step() expect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_simple_rnn_with_initial_bias') @staticmethod def export_seq_length() -> None: input = np.array([[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]], [[10., 11., 12.], [13., 14., 15.], [16., 17., 18.]]]).astype(np.float32) input_size = 3 hidden_size = 5 node = onnx.helper.make_node( 'RNN', inputs=['X', 'W', 'R', 'B'], outputs=['', 'Y_h'], hidden_size=hidden_size ) W = np.random.randn(1, hidden_size, input_size).astype(np.float32) R = np.random.randn(1, hidden_size, hidden_size).astype(np.float32) # Adding custom bias W_B = np.random.randn(1, hidden_size).astype(np.float32) R_B = np.random.randn(1, hidden_size).astype(np.float32) B = np.concatenate((W_B, R_B), axis=1) rnn = RNN_Helper(X=input, W=W, R=R, B=B) _, Y_h = rnn.step() expect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_rnn_seq_length') @staticmethod def export_batchwise() -> None: input = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]]).astype(np.float32) input_size = 2 hidden_size = 4 weight_scale = 0.5 layout = 1 node = onnx.helper.make_node( 'RNN', inputs=['X', 'W', 'R'], outputs=['Y', 'Y_h'], hidden_size=hidden_size, layout=layout ) W = weight_scale * np.ones((1, hidden_size, input_size)).astype(np.float32) R = weight_scale * np.ones((1, hidden_size, hidden_size)).astype(np.float32) rnn = RNN_Helper(X=input, W=W, R=R, layout=layout) Y, Y_h = rnn.step() expect(node, inputs=[input, W, R], outputs=[Y.astype(np.float32), Y_h.astype(np.float32)], name='test_simple_rnn_batchwise')
31.994737
132
0.536272
4a0121e3de116896df7f920580869efca35249f4
638
py
Python
manage.py
mohan488/restaurant_choices
03663aa2fd467ed3dfe0fa49097a68a0201adc37
[ "MIT" ]
null
null
null
manage.py
mohan488/restaurant_choices
03663aa2fd467ed3dfe0fa49097a68a0201adc37
[ "MIT" ]
null
null
null
manage.py
mohan488/restaurant_choices
03663aa2fd467ed3dfe0fa49097a68a0201adc37
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restaurant_choices.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
29
82
0.688088
4a0122409dae530dac8fe1b1ac5d2bb914a01cb8
3,500
py
Python
maipc/RNN/processing.py
nPironio/maipc
776e1d53063005d89ce463883c20ea5519a8b8d0
[ "BSD-3-Clause" ]
null
null
null
maipc/RNN/processing.py
nPironio/maipc
776e1d53063005d89ce463883c20ea5519a8b8d0
[ "BSD-3-Clause" ]
null
null
null
maipc/RNN/processing.py
nPironio/maipc
776e1d53063005d89ce463883c20ea5519a8b8d0
[ "BSD-3-Clause" ]
null
null
null
from typing import List, Dict import madmom as md import numpy as np NN_DTYPE = np.float32 def get_blstms(RNN: str) -> List[md.ml.nn.layers.BidirectionalLayer]: """ Get the input and bidirectional LSTM layers of a trained model Args: RNN: path to madmom trained NN Returns: list of layers """ rnn = md.ml.nn.NeuralNetwork.load(RNN) return rnn.layers[:3] def layers_values(RNN_input: List[float], blstms: List[md.ml.nn.layers.BidirectionalLayer], ppty_name: str) -> List[Dict[str, List[np.ndarray]]]: """ Get internal value activations for an input Args: RNN_input: input for the RNN blstms: list of the bidirectional layers of the network ppty_name: the type of values to get Returns: values organized by layer, direction (fwd/bwd) and frame """ layer_input = RNN_input layer_values = [] for bi_layer in blstms: layer_input, values = get_bidirectional_values(bi_layer, layer_input, ppty_name) layer_values.append(values) return layer_values def get_bidirectional_values(bi_layer: md.ml.nn.layers.BidirectionalLayer, layer_input: List[float], ppty_name: str) -> Dict[str, List[np.ndarray]]: """ Get the activation values for the forward and backward layer of a bidirectional layer Args: bi_layer: bidirectional layer layer_input: input to process by the layer ppty_name: the type of values to get Returns: dictionary with forward and backward layer activation values """ fwd, fwd_values = neurons_values(bi_layer.fwd_layer, layer_input, ppty_name) # also activate with reverse input bwd, bwd_values = neurons_values(bi_layer.bwd_layer, layer_input, ppty_name) # stack data output = np.hstack((fwd, bwd[::-1])) return output , {'forward': fwd_values, 'backward': bwd_values} def neurons_values(lstm_layer: md.ml.nn.layers.LSTMLayer, data: List[float], ppty_name: str) -> List[np.ndarray]: """ Get the activation values for a LSTM layer Args: lstm_layer: LSTM layer data: data to process ppty_name: the type of values to get Returns: List where each position is the activation value for a frame """ # init arrays size = len(data) # output matrix for the whole sequence out = np.zeros((size, lstm_layer.cell.bias.size), dtype=NN_DTYPE) # output list of internal values ppty_values = {'cell_state': [], 'output': []} # process the input data for i in range(size): # cache input data data_ = data[i] # input gate: # operate on current data, previous output and state ig = lstm_layer.input_gate.activate(data_, lstm_layer._prev, lstm_layer._state) # forget gate: # operate on current data, previous output and state fg = lstm_layer.forget_gate.activate(data_, lstm_layer._prev, lstm_layer._state) # cell: # operate on current data and previous output cell = lstm_layer.cell.activate(data_, lstm_layer._prev) # internal state: # weight the cell with the input gate # and add the previous state weighted by the forget gate lstm_layer._state = cell * ig + lstm_layer._state * fg # output gate: # operate on current data, previous output and current state og = lstm_layer.output_gate.activate(data_, lstm_layer._prev, lstm_layer._state) # output: # apply activation function to state and weight by output gate out[i] = lstm_layer.activation_fn(lstm_layer._state) * og # set reference to current output lstm_layer._prev = out[i] # store internal values ppty_values['cell_state'].append(cell) ppty_values['output'].append(out[i]) return out, ppty_values[ppty_name]
33.653846
113
0.742
4a012266a945c4f73140631f54688f9ac0930f21
4,703
py
Python
nssrc/com/citrix/netscaler/nitro/resource/config/network/rnatparam.py
mahabs/nitro
be74e1e177f5c205c16126bc9b023f2348788409
[ "Apache-2.0" ]
null
null
null
nssrc/com/citrix/netscaler/nitro/resource/config/network/rnatparam.py
mahabs/nitro
be74e1e177f5c205c16126bc9b023f2348788409
[ "Apache-2.0" ]
null
null
null
nssrc/com/citrix/netscaler/nitro/resource/config/network/rnatparam.py
mahabs/nitro
be74e1e177f5c205c16126bc9b023f2348788409
[ "Apache-2.0" ]
null
null
null
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class rnatparam(base_resource) : """ Configuration for RNAT parameter resource. """ def __init__(self) : self._tcpproxy = "" self._srcippersistency = "" @property def tcpproxy(self) : """Enable TCP proxy, which enables the NetScaler appliance to optimize the RNAT TCP traffic by using Layer 4 features.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._tcpproxy except Exception as e: raise e @tcpproxy.setter def tcpproxy(self, tcpproxy) : """Enable TCP proxy, which enables the NetScaler appliance to optimize the RNAT TCP traffic by using Layer 4 features.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED """ try : self._tcpproxy = tcpproxy except Exception as e: raise e @property def srcippersistency(self) : """Enable source ip persistency, which enables the NetScaler appliance to use the RNAT ips using source ip.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED. """ try : return self._srcippersistency except Exception as e: raise e @srcippersistency.setter def srcippersistency(self, srcippersistency) : """Enable source ip persistency, which enables the NetScaler appliance to use the RNAT ips using source ip.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED """ try : self._srcippersistency = srcippersistency except Exception as e: raise e def _get_nitro_response(self, service, response) : """ converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(rnatparam_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.rnatparam except Exception as e : raise e def _get_object_name(self) : """ Returns the value of object identifier argument """ try : return None except Exception as e : raise e @classmethod def update(cls, client, resource) : """ Use this API to update rnatparam. """ try : if type(resource) is not list : updateresource = rnatparam() updateresource.tcpproxy = resource.tcpproxy updateresource.srcippersistency = resource.srcippersistency return updateresource.update_resource(client) except Exception as e : raise e @classmethod def unset(cls, client, resource, args) : """ Use this API to unset the properties of rnatparam resource. Properties that need to be unset are specified in args array. """ try : if type(resource) is not list : unsetresource = rnatparam() return unsetresource.unset_resource(client, args) except Exception as e : raise e @classmethod def get(cls, client, name="", option_="") : """ Use this API to fetch all the rnatparam resources that are configured on netscaler. """ try : if not name : obj = rnatparam() response = obj.get_resources(client, option_) return response except Exception as e : raise e class Srcippersistency: ENABLED = "ENABLED" DISABLED = "DISABLED" class Tcpproxy: ENABLED = "ENABLED" DISABLED = "DISABLED" class rnatparam_response(base_response) : def __init__(self, length=1) : self.rnatparam = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.rnatparam = [rnatparam() for _ in range(length)]
31.777027
188
0.729109
4a0123445b0e21f1682293beec7db416b9f03e54
9,351
py
Python
mkt/versions/tests/test_models.py
muffinresearch/zamboni
045a6f07c775b99672af6d9857d295ed02fe5dd9
[ "BSD-3-Clause" ]
null
null
null
mkt/versions/tests/test_models.py
muffinresearch/zamboni
045a6f07c775b99672af6d9857d295ed02fe5dd9
[ "BSD-3-Clause" ]
null
null
null
mkt/versions/tests/test_models.py
muffinresearch/zamboni
045a6f07c775b99672af6d9857d295ed02fe5dd9
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import os.path from django.conf import settings import mock import path from nose.tools import eq_ import amo import amo.tests from mkt.files.models import File, Platform from mkt.files.tests.test_models import UploadTest as BaseUploadTest from mkt.site.fixtures import fixture from mkt.versions.compare import MAXVERSION, version_dict, version_int from mkt.versions.models import Version from mkt.webapps.models import Addon def test_version_int(): """Tests that version_int. Corrects our versions.""" eq_(version_int('3.5.0a1pre2'), 3050000001002) eq_(version_int(''), 200100) eq_(version_int('0'), 200100) eq_(version_int('*'), 99000000200100) eq_(version_int(MAXVERSION), MAXVERSION) eq_(version_int(MAXVERSION + 1), MAXVERSION) eq_(version_int('9999999'), MAXVERSION) def test_version_int_compare(): eq_(version_int('3.6.*'), version_int('3.6.99')) assert version_int('3.6.*') > version_int('3.6.8') def test_version_asterix_compare(): eq_(version_int('*'), version_int('99')) assert version_int('98.*') < version_int('*') eq_(version_int('5.*'), version_int('5.99')) assert version_int('5.*') > version_int('5.0.*') def test_version_dict(): eq_(version_dict('5.0'), {'major': 5, 'minor1': 0, 'minor2': None, 'minor3': None, 'alpha': None, 'alpha_ver': None, 'pre': None, 'pre_ver': None}) def test_version_int_unicode(): eq_(version_int(u'\u2322 ugh stephend'), 200100) class TestVersion(BaseUploadTest, amo.tests.TestCase): fixtures = fixture('webapp_337141', 'platform_all') def setUp(self): self._rename = path.path.rename self.version = Version.objects.latest('id') def test_developer_name(self): version = Version.objects.latest('id') version._developer_name = u'M€lâ' eq_(version.developer_name, u'M€lâ') eq_(Version(_developer_name=u'M€lâ').developer_name, u'M€lâ') @mock.patch('mkt.files.utils.parse_addon') def test_developer_name_from_upload(self, parse_addon): parse_addon.return_value = { 'version': '42.0', 'developer_name': u'Mýself' } addon = Addon.objects.get(pk=337141) # Note: we need a valid FileUpload instance, but in the end we are not # using its contents since we are mocking parse_addon(). path = os.path.join(settings.ROOT, 'mkt', 'developers', 'tests', 'addons', 'mozball.webapp') upload = self.get_upload(abspath=path) platform = Platform.objects.get(pk=amo.PLATFORM_ALL.id) version = Version.from_upload(upload, addon, [platform]) eq_(version.version, '42.0') eq_(version.developer_name, u'Mýself') @mock.patch('mkt.files.utils.parse_addon') def test_long_developer_name_from_upload(self, parse_addon): truncated_developer_name = u'ý' * 255 long_developer_name = truncated_developer_name + u'àààà' parse_addon.return_value = { 'version': '42.1', 'developer_name': long_developer_name } addon = Addon.objects.get(pk=337141) # Note: we need a valid FileUpload instance, but in the end we are not # using its contents since we are mocking parse_addon(). path = os.path.join(settings.ROOT, 'mkt', 'developers', 'tests', 'addons', 'mozball.webapp') upload = self.get_upload(abspath=path) platform = Platform.objects.get(pk=amo.PLATFORM_ALL.id) version = Version.from_upload(upload, addon, [platform]) eq_(version.version, '42.1') eq_(version.developer_name, truncated_developer_name) def test_is_privileged_hosted_app(self): addon = Addon.objects.get(pk=337141) eq_(addon.current_version.is_privileged, False) @mock.patch('mkt.webapps.models.Webapp.get_manifest_json') def test_is_privileged_app(self, get_manifest_json): get_manifest_json.return_value = { 'type': 'privileged' } addon = Addon.objects.get(pk=337141) addon.update(is_packaged=True) eq_(addon.current_version.is_privileged, True) @mock.patch('mkt.webapps.models.Webapp.get_manifest_json') def test_is_privileged_non_privileged_app(self, get_manifest_json): get_manifest_json.return_value = { } addon = Addon.objects.get(pk=337141) addon.update(is_packaged=True) eq_(addon.current_version.is_privileged, False) def test_delete(self): version = Version.objects.all()[0] eq_(Version.objects.count(), 1) version.delete() eq_(Version.objects.count(), 0) eq_(Version.with_deleted.count(), 1) # Ensure deleted version's files get disabled. eq_(version.all_files[0].status, amo.STATUS_DISABLED) def test_supported_platforms(self): assert amo.PLATFORM_ALL in self.version.supported_platforms, ( 'Missing PLATFORM_ALL') def test_major_minor(self): """Check that major/minor/alpha is getting set.""" v = Version(version='3.0.12b2') eq_(v.major, 3) eq_(v.minor1, 0) eq_(v.minor2, 12) eq_(v.minor3, None) eq_(v.alpha, 'b') eq_(v.alpha_ver, 2) v = Version(version='3.6.1apre2+') eq_(v.major, 3) eq_(v.minor1, 6) eq_(v.minor2, 1) eq_(v.alpha, 'a') eq_(v.pre, 'pre') eq_(v.pre_ver, 2) v = Version(version='') eq_(v.major, None) eq_(v.minor1, None) eq_(v.minor2, None) eq_(v.minor3, None) def test_has_files(self): assert self.version.has_files, 'Version with files not recognized.' self.version.files.all().delete() self.version = Version.objects.latest('id') assert not self.version.has_files, ( 'Version without files not recognized.') def _get_version(self, status): v = Version() v.all_files = [mock.Mock()] v.all_files[0].status = status return v @mock.patch('mkt.versions.models.storage') def test_version_delete(self, storage_mock): self.version.delete() addon = Addon.objects.get(pk=337141) assert addon assert not Version.objects.filter(addon=addon).exists() assert Version.with_deleted.filter(addon=addon).exists() assert not storage_mock.delete.called @mock.patch('mkt.versions.models.storage') def test_packaged_version_delete(self, storage_mock): addon = Addon.objects.get(pk=337141) addon.update(is_packaged=True) version = addon.current_version version.delete() assert not Version.objects.filter(addon=addon).exists() assert Version.with_deleted.filter(addon=addon).exists() assert storage_mock.delete.called def test_version_delete_files(self): eq_(self.version.files.all()[0].status, amo.STATUS_PUBLIC) self.version.delete() eq_(self.version.files.all()[0].status, amo.STATUS_DISABLED) @mock.patch('mkt.files.models.File.hide_disabled_file') def test_new_version_disable_old_unreviewed(self, hide_mock): addon = Addon.objects.get(pk=337141) # The status doesn't change for public files. qs = File.objects.filter(version=addon.current_version) eq_(qs.all()[0].status, amo.STATUS_PUBLIC) Version.objects.create(addon=addon) eq_(qs.all()[0].status, amo.STATUS_PUBLIC) assert not hide_mock.called qs.update(status=amo.STATUS_PENDING) version = Version.objects.create(addon=addon) version.disable_old_files() eq_(qs.all()[0].status, amo.STATUS_DISABLED) assert hide_mock.called def test_large_version_int(self): # This version will fail to be written to the version_int # table because the resulting int is bigger than mysql bigint. version = Version(addon=Addon.objects.get(pk=337141)) version.version = '9223372036854775807' version.save() eq_(version.version_int, None) def _reset_version(self, version): version.all_files[0].status = amo.STATUS_PUBLIC version.deleted = False def test_version_is_public(self): addon = Addon.objects.get(id=337141) version = amo.tests.version_factory(addon=addon) # Base test. Everything is in order, the version should be public. eq_(version.is_public(), True) # Non-public file. self._reset_version(version) version.all_files[0].status = amo.STATUS_DISABLED eq_(version.is_public(), False) # Deleted version. self._reset_version(version) version.deleted = True eq_(version.is_public(), False) # Non-public addon. self._reset_version(version) with mock.patch('mkt.webapps.models.Addon.is_public') as is_addon_public: is_addon_public.return_value = False eq_(version.is_public(), False) def test_app_feature_creation_app(self): app = Addon.objects.create(type=amo.ADDON_WEBAPP) ver = Version.objects.create(addon=app) assert ver.features, 'AppFeatures was not created with version.'
35.154135
81
0.64977
4a0124266c7321b851f607e697f6037e3800060c
2,373
py
Python
projects/causal_scene_generation/causal_model/game_characters/vae_svi/models/encoder.py
amoskowitz14/causalML
6c21033b05c82b3ba55efce6258c38669287eaa9
[ "MIT" ]
354
2018-12-21T15:20:21.000Z
2021-01-02T14:48:51.000Z
projects/causal_scene_generation/causal_model/game_characters/vae_svi/models/encoder.py
amoskowitz14/causalML
6c21033b05c82b3ba55efce6258c38669287eaa9
[ "MIT" ]
5
2021-04-15T20:38:12.000Z
2022-03-12T00:52:29.000Z
projects/causal_scene_generation/causal_model/game_characters/vae_svi/models/encoder.py
amoskowitz14/causalML
6c21033b05c82b3ba55efce6258c38669287eaa9
[ "MIT" ]
112
2019-05-21T22:10:43.000Z
2020-12-29T05:52:07.000Z
import torch import torch.nn as nn from utils.utils import Flatten def get_cnn_encoder(image_channels=3): return nn.Sequential( nn.Conv2d(image_channels, 8, kernel_size=5, stride=2), nn.BatchNorm2d(8), nn.ReLU(), nn.Conv2d(8, 16, kernel_size=5, stride=2), nn.BatchNorm2d(16), nn.ReLU(), nn.Conv2d(16, 32, kernel_size=3, stride=2), nn.BatchNorm2d(32), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 128, kernel_size=3, stride=2), nn.BatchNorm2d(128), nn.ReLU(), nn.Conv2d(128, 256, kernel_size=3, stride=2), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 512, kernel_size=2, stride=2), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 1024, kernel_size=2, stride=1), nn.BatchNorm2d(1024), nn.ReLU(), Flatten() ) class Encoder(nn.Module): def __init__(self, z_dim, hidden_dim=1024, num_labels=17): super().__init__() self.cnn = get_cnn_encoder(image_channels=3) # Currently this returns only for 1024 hidden dimensions. Need to change that # setup the two linear transformations used self.fc21 = nn.Linear(hidden_dim+num_labels, z_dim) self.fc22 = nn.Linear(hidden_dim+num_labels, z_dim) # setup the non-linearities self.softplus = nn.Softplus() def forward(self, x,y): ''' Here if i get an array of [xs, ys] what should i do ? xs is gonna be of the shape (32, 3, 400,400) and ys is gonna be of the shape (32,10) ''' # define the forward computation on the image x # first shape the mini-batch to have pixels in the rightmost dimension #x = x.reshape(-1, 40000) # then compute the hidden units hidden = self.cnn(x) hidden = self.softplus(hidden) # This should return a [1, 1024] vector. # then return a mean vector and a (positive) square root covariance # each of size batch_size x z_dim hidden = torch.cat([hidden, y], dim=-1) z_loc = self.fc21(hidden) z_scale = torch.exp(self.fc22(hidden)) return z_loc, z_scale
38.274194
130
0.578592
4a01242b1bf68033e31c544442114ae45f0a33a2
3,020
py
Python
lib/jammy/models/azurefirewall/azure_firewall_nat_rule.py
girishmotwani/jammy
58ca1e741800781e6bebc761a0a8e4d4f8ef9bfb
[ "MIT" ]
2
2020-11-06T07:46:37.000Z
2021-01-14T01:41:04.000Z
lib/jammy/models/azurefirewall/azure_firewall_nat_rule.py
girishmotwani/jammy
58ca1e741800781e6bebc761a0a8e4d4f8ef9bfb
[ "MIT" ]
3
2020-12-28T20:43:11.000Z
2020-12-28T20:45:25.000Z
lib/jammy/models/azurefirewall/azure_firewall_nat_rule.py
girishmotwani/jammy
58ca1e741800781e6bebc761a0a8e4d4f8ef9bfb
[ "MIT" ]
1
2021-01-14T01:41:10.000Z
2021-01-14T01:41:10.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class AzureFirewallNatRule(Model): """Properties of a NAT rule. :param name: Name of the NAT rule. :type name: str :param description: Description of the rule. :type description: str :param source_addresses: List of source IP addresses for this rule. :type source_addresses: list[str] :param destination_addresses: List of destination IP addresses for this rule. Supports IP ranges, prefixes, and service tags. :type destination_addresses: list[str] :param destination_ports: List of destination ports. :type destination_ports: list[str] :param protocols: Array of AzureFirewallNetworkRuleProtocols applicable to this NAT rule. :type protocols: list[str or ~azurefirewall.models.AzureFirewallNetworkRuleProtocol] :param translated_address: The translated address for this NAT rule. :type translated_address: str :param translated_port: The translated port for this NAT rule. :type translated_port: str :param translated_fqdn: The translated FQDN for this NAT rule. :type translated_fqdn: str :param source_ip_groups: List of source IpGroups for this rule. :type source_ip_groups: list[str] """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'}, 'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'}, 'destination_ports': {'key': 'destinationPorts', 'type': '[str]'}, 'protocols': {'key': 'protocols', 'type': '[str]'}, 'translated_address': {'key': 'translatedAddress', 'type': 'str'}, 'translated_port': {'key': 'translatedPort', 'type': 'str'}, 'translated_fqdn': {'key': 'translatedFqdn', 'type': 'str'}, 'source_ip_groups': {'key': 'sourceIpGroups', 'type': '[str]'}, } def __init__(self, **kwargs): super(AzureFirewallNatRule, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) self.source_addresses = kwargs.get('source_addresses', None) self.destination_addresses = kwargs.get('destination_addresses', None) self.destination_ports = kwargs.get('destination_ports', None) self.protocols = kwargs.get('protocols', None) self.translated_address = kwargs.get('translated_address', None) self.translated_port = kwargs.get('translated_port', None) self.translated_fqdn = kwargs.get('translated_fqdn', None) self.source_ip_groups = kwargs.get('source_ip_groups', None)
47.1875
82
0.646026
4a01248d04fbfa176c165775352d140ccf742896
333
py
Python
src/bel_resources/__main__.py
pybel/bel-resources
c02be74fa305d085f825ea7996ff9cf208588219
[ "MIT" ]
null
null
null
src/bel_resources/__main__.py
pybel/bel-resources
c02be74fa305d085f825ea7996ff9cf208588219
[ "MIT" ]
null
null
null
src/bel_resources/__main__.py
pybel/bel-resources
c02be74fa305d085f825ea7996ff9cf208588219
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Entrypoint module, in case you use ``python3 -m bel_resources``. Why does this file exist, and why ``__main__``? For more info, read: - https://www.python.org/dev/peps/pep-0338/ - https://docs.python.org/3/using/cmdline.html#cmdoption-m """ from .cli import main if __name__ == '__main__': main()
22.2
68
0.66967
4a01255f2fda692e136ab4f7050333701aac75ab
8,199
py
Python
procrastinate/psycopg2_connector.py
peopledoc/procrastinate
f7bc2a3016f6d5e5196368caf92b42320cfe1825
[ "MIT" ]
129
2019-09-14T16:04:52.000Z
2021-09-22T07:12:25.000Z
procrastinate/psycopg2_connector.py
peopledoc/procrastinate
f7bc2a3016f6d5e5196368caf92b42320cfe1825
[ "MIT" ]
389
2019-08-16T07:56:41.000Z
2021-09-27T12:56:24.000Z
procrastinate/psycopg2_connector.py
peopledoc/procrastinate
f7bc2a3016f6d5e5196368caf92b42320cfe1825
[ "MIT" ]
17
2019-09-21T09:14:00.000Z
2021-09-18T01:15:35.000Z
import contextlib import functools import logging import re from typing import Any, Callable, Dict, Optional import psycopg2 import psycopg2.errors import psycopg2.pool from psycopg2.extras import Json, RealDictCursor from procrastinate import connector, exceptions logger = logging.getLogger(__name__) def wrap_exceptions(func: Callable) -> Callable: """ Wrap psycopg2 errors as connector exceptions. """ @functools.wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except psycopg2.errors.UniqueViolation as exc: raise exceptions.UniqueViolation(constraint_name=exc.diag.constraint_name) except psycopg2.Error as exc: raise exceptions.ConnectorException from exc # Attaching a custom attribute to ease testability and make the # decorator more introspectable wrapped._exceptions_wrapped = True # type: ignore return wrapped def wrap_query_exceptions(func: Callable) -> Callable: """ Detect "admin shutdown" errors and retry a number of times. This is to handle the case where the database connection (obtained from the pool) was actually closed by the server. In this case, pyscopg2 raises an AdminShutdown exception when the connection is used for issuing a query. What we do is retry when an AdminShutdown is raised, and until the maximum number of retries is reached. The number of retries is set to the pool maximum size plus one, to handle the case where the connections we have in the pool were all closed on the server side. """ @functools.wraps(func) def wrapped(*args, **kwargs): final_exc = None try: max_tries = args[0]._pool.maxconn + 1 except Exception: max_tries = 1 for _ in range(max_tries): try: return func(*args, **kwargs) except psycopg2.errors.AdminShutdown: continue raise exceptions.ConnectorException( "Could not get a valid connection after {} tries".format(max_tries) ) from final_exc return wrapped PERCENT_PATTERN = re.compile(r"%(?![\(s])") class Psycopg2Connector(connector.BaseConnector): @wrap_exceptions def __init__( self, *, json_dumps: Optional[Callable] = None, json_loads: Optional[Callable] = None, **kwargs: Any, ): """ Synchronous connector based on a ``psycopg2.pool.ThreadedConnectionPool``. This is used if you want your ``.defer()`` calls to be purely synchronous, not asynchronous with a sync wrapper. You may need this if your program is multi-threaded and doesn't handle async loops well (see `discussion-sync-defer`). All other arguments than ``json_dumps`` are passed to :py:func:`ThreadedConnectionPool` (see psycopg2 documentation__), with default values that may differ from those of ``psycopg2`` (see a partial list of parameters below). .. _psycopg2 doc: https://www.psycopg.org/docs/extras.html#json-adaptation .. __: https://www.psycopg.org/docs/pool.html #psycopg2.pool.ThreadedConnectionPool Parameters ---------- json_dumps : The JSON dumps function to use for serializing job arguments. Defaults to the function used by psycopg2. See the `psycopg2 doc`_. json_loads : The JSON loads function to use for deserializing job arguments. Defaults to the function used by psycopg2. See the `psycopg2 doc`_. Unused if the pool is externally created and set into the connector through the ``App.open`` method. minconn : int Passed to psycopg2, default set to 1 (same as aiopg). maxconn : int Passed to psycopg2, default set to 10 (same as aiopg). dsn : ``Optional[str]`` Passed to psycopg2. Default is "" instead of None, which means if no argument is passed, it will connect to localhost:5432 instead of a Unix-domain local socket file. cursor_factory : ``psycopg2.extensions.cursor`` Passed to psycopg2. Default is ``psycopg2.extras.RealDictCursor`` instead of standard cursor. There is no identified use case for changing this. """ self.json_dumps = json_dumps self.json_loads = json_loads self._pool: Optional[psycopg2.pool.AbstractConnectionPool] = None self._pool_args = self._adapt_pool_args(kwargs) self._pool_externally_set = False @staticmethod def _adapt_pool_args(pool_args: Dict[str, Any]) -> Dict[str, Any]: """ Adapt the pool args for ``psycopg2``, using sensible defaults for Procrastinate. """ final_args = { "minconn": 1, "maxconn": 10, "dsn": "", "cursor_factory": RealDictCursor, } final_args.update(pool_args) return final_args def open(self, pool: Optional[psycopg2.pool.AbstractConnectionPool] = None) -> None: """ Instantiate the pool. pool : Optional pool. Procrastinate can use an existing pool. Connection parameters passed in the constructor will be ignored. """ if pool: self._pool_externally_set = True self._pool = pool else: self._pool = self._create_pool(self._pool_args) @staticmethod @wrap_exceptions def _create_pool(pool_args: Dict[str, Any]) -> psycopg2.pool.AbstractConnectionPool: return psycopg2.pool.ThreadedConnectionPool(**pool_args) @wrap_exceptions def close(self) -> None: """ Close the pool """ if self._pool and not self._pool.closed and not self._pool_externally_set: self._pool.closeall() @property def pool(self) -> psycopg2.pool.AbstractConnectionPool: if self._pool is None: # Set by open raise exceptions.AppNotOpen return self._pool def _wrap_json(self, arguments: Dict[str, Any]): return { key: Json(value, dumps=self.json_dumps) if isinstance(value, dict) else value for key, value in arguments.items() } @contextlib.contextmanager def _connection(self) -> psycopg2.extensions.connection: # in case of an admin shutdown (Postgres error code 57P01) we do not # rollback the connection or put the connection back to the pool as # this will cause a psycopg2.InterfaceError exception connection = self.pool.getconn() try: yield connection except psycopg2.errors.AdminShutdown: raise except Exception: connection.rollback() self.pool.putconn(connection) raise else: connection.commit() self.pool.putconn(connection) @wrap_exceptions @wrap_query_exceptions def execute_query(self, query: str, **arguments: Any) -> None: with self._connection() as connection: with connection.cursor() as cursor: cursor.execute( PERCENT_PATTERN.sub("%%", query), self._wrap_json(arguments) ) @wrap_exceptions @wrap_query_exceptions def execute_query_one(self, query: str, **arguments: Any) -> Dict[str, Any]: with self._connection() as connection: with connection.cursor() as cursor: cursor.execute( PERCENT_PATTERN.sub("%%", query), self._wrap_json(arguments) ) return cursor.fetchone() @wrap_exceptions @wrap_query_exceptions def execute_query_all(self, query: str, **arguments: Any) -> Dict[str, Any]: with self._connection() as connection: with connection.cursor() as cursor: cursor.execute( PERCENT_PATTERN.sub("%%", query), self._wrap_json(arguments) ) return cursor.fetchall()
35.803493
88
0.629711
4a0126d5ef2af5643c1610ec7ad7a11063b3d496
1,587
py
Python
Graph/P02_DepthFirstSearch.py
akhileshsantoshwar/DataStructures-using-python
9465af2a40e05daabeed45be5ed118591b0e7150
[ "MIT" ]
null
null
null
Graph/P02_DepthFirstSearch.py
akhileshsantoshwar/DataStructures-using-python
9465af2a40e05daabeed45be5ed118591b0e7150
[ "MIT" ]
null
null
null
Graph/P02_DepthFirstSearch.py
akhileshsantoshwar/DataStructures-using-python
9465af2a40e05daabeed45be5ed118591b0e7150
[ "MIT" ]
null
null
null
# Author: AKHILESH SANTOSHWAR class Graph(): def __init__(self): self.vertex = {} # for printing the Graph vertexes def printGraph(self): print(self.vertex) for i in self.vertex.keys(): print(i,' -> ', ' -> '.join([str(j) for j in self.vertex[i]])) # for adding the edge beween two vertexes def addEdge(self, fromVertex, toVertex): # check if vertex is already present, if fromVertex in self.vertex.keys(): self.vertex[fromVertex].append(toVertex) else: # else make a new vertex self.vertex[fromVertex] = [toVertex] def DFS(self): # visited array for storing already visited nodes visited = [False] * len(self.vertex) # call the recursive helper function for i in range(len(self.vertex)): if visited[i] == False: self.DFSRec(i, visited) def DFSRec(self, startVertex, visited): # mark start vertex as visited visited[startVertex] = True print(startVertex, end = ' ') # Recur for all the vertexes that are adjacent to this node for i in self.vertex.keys(): if visited[i] == False: self.DFSRec(i, visited) if __name__ == '__main__': g = Graph() g.addEdge(0, 1) g.addEdge(0, 2) g.addEdge(1, 2) g.addEdge(2, 0) g.addEdge(2, 3) g.addEdge(3, 3) g.printGraph() print('DFS:') g.DFS() # OUTPUT: # 0  ->  1 -> 2 # 1  ->  2 # 2  ->  0 -> 3 # 3  ->  3 # DFS: # 0 1 2 3
25.596774
74
0.544423
4a0127dc06f6f34788b098fd9d05389fa6b82b26
6,378
py
Python
mobilenet_v2.py
foamliu/DeepIQA
f47c285b2f84806857af46cf04691c887492616c
[ "MIT" ]
2
2019-10-28T06:59:03.000Z
2019-10-28T11:48:25.000Z
mobilenet_v2.py
foamliu/DeepIQA
f47c285b2f84806857af46cf04691c887492616c
[ "MIT" ]
null
null
null
mobilenet_v2.py
foamliu/DeepIQA
f47c285b2f84806857af46cf04691c887492616c
[ "MIT" ]
null
null
null
import os import torch from torch import nn from torch.quantization import QuantStub, DeQuantStub from torchsummary import summary from config import device def _make_divisible(v, divisor, min_value=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class ConvBNReLU(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__( nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes, momentum=0.1), # Replace with ReLU nn.ReLU(inplace=False) ) class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup layers = [] if expand_ratio != 1: # pw layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) layers.extend([ # dw ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), # pw-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup, momentum=0.1), ]) self.conv = nn.Sequential(*layers) # Replace torch.add with floatfunctional self.skip_add = nn.quantized.FloatFunctional() def forward(self, x): if self.use_res_connect: return self.skip_add.add(x, self.conv(x)) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8): """ MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding """ super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = 32 last_channel = 1280 if inverted_residual_setting is None: inverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError("inverted_residual_setting should be non-empty " "or a 4-element list, got {}".format(inverted_residual_setting)) # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features = [ConvBNReLU(3, input_channel, stride=2)] # building inverted residual blocks for t, c, n, s in inverted_residual_setting: output_channel = _make_divisible(c * width_mult, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel # building last several layers features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) # make it nn.Sequential self.features = nn.Sequential(*features) self.quant = QuantStub() self.dequant = DeQuantStub() # building classifier self.classifier = nn.Sequential( nn.Dropout(0.2), nn.Linear(self.last_channel, 2), nn.Softmax() # nn.Sigmoid() ) # weight initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def forward(self, x): x = self.quant(x) x = self.features(x) x = x.mean([2, 3]) x = self.classifier(x) x = self.dequant(x) return x # Fuse Conv+BN and Conv+BN+Relu modules prior to quantization # This operation does not change the numerics def fuse_model(self): for m in self.modules(): if type(m) == ConvBNReLU: torch.quantization.fuse_modules(m, ['0', '1', '2'], inplace=True) if type(m) == InvertedResidual: for idx in range(len(m.conv)): if type(m.conv[idx]) == nn.Conv2d: torch.quantization.fuse_modules(m.conv, [str(idx), str(idx + 1)], inplace=True) def print_size_of_model(model): torch.save(model.state_dict(), "temp.p") print('Size (MB):', os.path.getsize("temp.p") / 1e6) os.remove('temp.p') if __name__ == "__main__": model = MobileNetV2().to(device) print(model) summary(model, input_size=(3, 1944, 2592)) print_size_of_model(model)
35.631285
107
0.590624
4a012903bf088d4b245c470845ec9c9dc8b31530
1,924
py
Python
tests.py
ToniIvars/flask-url-shortener
77dd7b8739403bcf27d689445e6e760c4545e26c
[ "MIT" ]
null
null
null
tests.py
ToniIvars/flask-url-shortener
77dd7b8739403bcf27d689445e6e760c4545e26c
[ "MIT" ]
null
null
null
tests.py
ToniIvars/flask-url-shortener
77dd7b8739403bcf27d689445e6e760c4545e26c
[ "MIT" ]
null
null
null
import unittest from app import app import json class URLTests(unittest.TestCase): def setUp(self): app.config['TESTING'] = True app.config['WTF_CSRF_ENABLED'] = False app.config['DEBUG'] = False self.app = app.test_client() def tearDown(self): with open('urls.json', 'w') as f: f.write('') f.close() def test_shorten_valid_url(self): response = self.app.post('/acortado', follow_redirects=True, data={'data':'https://www.google.com'}) self.assertIn(b'URL acortada:', response.data) def test_shorten_invalid_or_null_url(self): response1 = self.app.post('/acortado', follow_redirects=True, data={'data':'google'}) response2 = self.app.post('/acortado', follow_redirects=True, data={'data':''}) self.assertIn(b'Debes escribir una URL', response1.data) self.assertIn(b'Debes escribir una URL', response2.data) def test_go_to_shortened_url(self): response = self.app.post('/acortado', follow_redirects=True, data={'data':'https://www.google.com'}) url_index = str(response.data).find('<a href') url = str(response.data)[url_index+8:url_index+37] response = self.app.get(f'/{url.split("/")[-1]}', follow_redirects=True) self.assertEqual(response.status_code, 200) def test_go_to_invalid_shortened_url(self): response = self.app.get('/ongonboerngonwgji', follow_redirects=True) self.assertEqual(response.status_code, 404) def test_api(self): response = self.app.get('/api?url=https://www.google.com', follow_redirects=True) json_object = json.loads(response.data) response = self.app.get(f'/{json_object["shortened"].split("/")[-1]}', follow_redirects=True) self.assertEqual(response.status_code, 200) if __name__ == '__main__': unittest.main()
37.72549
108
0.640852
4a012a2587f11de074ce1f3c726b9554865e06c9
1,526
py
Python
Interpolation/Other/Show masters of next glyph.py
juandelperal/Glyphs-Scripts
1f3cb71683ec044dff67a46cd895773e8271effa
[ "Apache-2.0" ]
null
null
null
Interpolation/Other/Show masters of next glyph.py
juandelperal/Glyphs-Scripts
1f3cb71683ec044dff67a46cd895773e8271effa
[ "Apache-2.0" ]
null
null
null
Interpolation/Other/Show masters of next glyph.py
juandelperal/Glyphs-Scripts
1f3cb71683ec044dff67a46cd895773e8271effa
[ "Apache-2.0" ]
null
null
null
#MenuTitle: Show Masters of Next Glyph # -*- coding: utf-8 -*- from __future__ import division, print_function, unicode_literals from builtins import str __doc__=""" Shows all masters for the next glyph. """ from Foundation import NSRange from PyObjCTools.AppHelper import callAfter zeroPosition = NSRange() zeroPosition.location = 0 zeroPosition.length = 0 def showAllMastersOfGlyphInCurrentTab( thisGlyphName ): try: escapedGlyphName = "/" + thisGlyphName thisDoc = Glyphs.currentDocument thisWindow = thisDoc.windowController() thisEditView = thisWindow.activeEditViewController() # current tab if thisEditView is None: # opens new Edit tab if none was open: callAfter( thisWindow.addTabWithString_, escapedGlyphName ) else: thisGraphicView = thisEditView.graphicView() # current display string thisGraphicView.setDisplayString_( escapedGlyphName ) # set the display string to this glyph thisGraphicView.setSelectedRange_( zeroPosition ) # moves the cursor to beginning thisEditView.insertAllMasters_( None ) # insert all masters return True except Exception as e: print e return False thisFont = Glyphs.font # frontmost font currentLayer = thisFont.selectedLayers[0] currentGlyph = currentLayer.parent currentGlyphIndex = currentGlyph.glyphId() newGlyphIndex = currentGlyphIndex + 1 newGlyphName = thisFont.glyphs[newGlyphIndex].name if not showAllMastersOfGlyphInCurrentTab( newGlyphName ): print "Error: could not insert masters of %s in current edit tab." % newGlyphName
33.173913
95
0.787025
4a012ae0263dca7df26be3da8f9f7c4c21326393
655
pyde
Python
sketches/schwellenwert/schwellenwert.pyde
kantel/processingpy
74aae222e46f68d1c8f06307aaede3cdae65c8ec
[ "MIT" ]
4
2018-06-03T02:11:46.000Z
2021-08-18T19:55:15.000Z
sketches/schwellenwert/schwellenwert.pyde
kantel/processingpy
74aae222e46f68d1c8f06307aaede3cdae65c8ec
[ "MIT" ]
null
null
null
sketches/schwellenwert/schwellenwert.pyde
kantel/processingpy
74aae222e46f68d1c8f06307aaede3cdae65c8ec
[ "MIT" ]
3
2019-12-23T19:12:51.000Z
2021-04-30T14:00:31.000Z
from random import randint def setup(): global akt size(800, 640) this.surface.setTitle("Schwellenwert-Akt") akt = loadImage("akt.jpg"); image(akt, 0, 0) noLoop() def index(x, y): return(x + y * akt.width) def draw(): akt.filter(GRAY) akt.loadPixels() for y in range(akt.height): for x in range(akt.width): s = 100 pix = akt.pixels[index(x, y)] c = red(pix) if c <= s: akt.pixels[index(x, y)] = color(0) else: akt.pixels[index(x, y)] = color(255) akt.updatePixels() image(akt, 400, 0)
20.46875
52
0.500763
4a012b348e93f9fddaaedf1b4e69e6c3ed233433
2,765
py
Python
scitbx/math/tests/tst_uniform_rotation_matrix.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
155
2016-11-23T12:52:16.000Z
2022-03-31T15:35:44.000Z
scitbx/math/tests/tst_uniform_rotation_matrix.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
590
2016-12-10T11:31:18.000Z
2022-03-30T23:10:09.000Z
scitbx/math/tests/tst_uniform_rotation_matrix.py
dperl-sol/cctbx_project
b9e390221a2bc4fd00b9122e97c3b79c632c6664
[ "BSD-3-Clause-LBNL" ]
115
2016-11-15T08:17:28.000Z
2022-02-09T15:30:14.000Z
from __future__ import absolute_import, division, print_function import math, random from scitbx import matrix from scitbx.array_family import flex from six.moves import range # ----------------------------------------------------------------------------- # simplified form of surface integral for spherical harmonic (l = m) # http://en.wikipedia.org/wiki/Table_of_spherical_harmonics def ylm(lm,c,t,p): y = c * math.pow(math.sin(t),lm) * complex(math.cos(lm*p),math.sin(lm*p)) return y * y.conjugate() * math.sin(t) # ----------------------------------------------------------------------------- def add_point(lm,c,R): x = matrix.col( [0,0,1] ) new_x = R * x theta = math.acos(new_x[2]) # theta = [0, pi] phi = math.atan2(new_x[1],new_x[0]) + math.pi # phi = [0, 2pi) return ylm(lm,c,theta,phi) # ----------------------------------------------------------------------------- def test_uniform_rotation_matrix(N=10000,choice=2,verbose=False): r""" The surface integral of a spherical harmonic function with its conjugate should be 1. (http://mathworld.wolfram.com/SphericalHarmonic.html, Eq 7) From Mathematica, l = 10; m = 10; y = SphericalHarmonicY[l, m, \[Theta], \[Phi]]; Integrate[y*Conjugate[y]*Sin[\[Theta]], {\[Theta], 0, Pi}, {\[Phi], 0, 2*Pi}] should yield 1. By picking uniformly random points on a sphere, the surface integral can be numerically approximated. The results in the comments below are for N = 1 000 000. """ if (choice == 0): # l=1, m=1 # result = (0.883199394206+0j) (0.883824001444+0j) lm = 1 c = -0.5 * math.sqrt(1.5/math.pi) elif (choice == 1): # l = 5, m = 5 # result = (0.959557841214+0j) (0.959331535539+0j) lm = 5 c = -(3/32) * math.sqrt(77/math.pi) else: # l = 10, m = 10 # result = (0.977753926603+0j) (0.97686871766+0j) lm = 10 c = (1/1024) * math.sqrt(969969/math.pi) result = [ 0.0, 0.0 ] for i in range(N): R = [ matrix.sqr(flex.random_double_r3_rotation_matrix()), matrix.sqr(flex.random_double_r3_rotation_matrix_arvo_1992()) ] for j in range(len(result)): result[j] += add_point(lm,c,R[j]) # multipy by area at the end, each point has an area of 4pi/N point_area = 4.0*math.pi/N # surface area of unit sphere / number of points for i in range(len(result)): result[i] = point_area * result[i] if (verbose): print(result[i], end=' ') if (verbose): print() assert(result[0].real > 0.85) assert(result[0].real < 1.15) assert(result[1].real > 0.85) assert(result[1].real < 1.15) if (__name__ == '__main__'): flex.set_random_seed(0) for i in range(3): test_uniform_rotation_matrix(N=1000, choice=i, verbose=False) print('OK')
31.781609
79
0.585172
4a012b698d6c219105db0597dcd52eb765507d38
556
py
Python
preta.py
Develable/Rutap-Bot-2019
e141cc2cb60572eb35f9e952679d511d66edbba3
[ "MIT" ]
2
2020-05-23T09:44:41.000Z
2020-05-23T11:15:32.000Z
preta.py
develable/Rutap-Bot-2019
e141cc2cb60572eb35f9e952679d511d66edbba3
[ "MIT" ]
null
null
null
preta.py
develable/Rutap-Bot-2019
e141cc2cb60572eb35f9e952679d511d66edbba3
[ "MIT" ]
1
2020-10-31T13:58:45.000Z
2020-10-31T13:58:45.000Z
# -*- coding:utf-8 -*- ######################################################### # Rutap Bot 2019 Timeform Module (By. Preta) # # 모든 저작권은 Preta가 소유합니다. 모든 권리를 보유합니다. # ######################################################### import time, datetime def timeform(dt1): now = datetime.datetime.now() msgtime = str(dt1) mili = str(msgtime)[-6:] msgtime = str(msgtime)[:-7] msgtime = time.strptime(msgtime,'%Y-%m-%d %H:%M:%S') msgtime = time.mktime(msgtime) msgtime = float(str(msgtime)[:-1] + mili) return msgtime
30.888889
57
0.478417
4a012ca769afdf6ce35e5f5aabe09729e5e1db74
4,228
py
Python
manage_aws_services.py
jumapeter/aws_services
cb13c37983aebeb1e2d1e0b6307e168aab47970a
[ "MIT" ]
null
null
null
manage_aws_services.py
jumapeter/aws_services
cb13c37983aebeb1e2d1e0b6307e168aab47970a
[ "MIT" ]
null
null
null
manage_aws_services.py
jumapeter/aws_services
cb13c37983aebeb1e2d1e0b6307e168aab47970a
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from configparser import ConfigParser import boto3 import argparse # Read & parse commandline arguments parser = argparse.ArgumentParser() parser.add_argument('-s', '--service', dest='aws_service', choices=['iam', 'ec2', 'rds', 's3', 'ecr', 'ecs'], help="List instances of AWS services", required=True) parser.add_argument('-i', '--input', type=str, help="Path to AWS Credentials file in INI format", required=True) args = parser.parse_args() svc = args.aws_service credentials_file = args.input # Read & parse AWS credentials file def config(filename, section): # create a parser parser = ConfigParser() # read config file parser.read(filename) # get section, default to credentials aws = {} if parser.has_section(section): params = parser.items(section) for param in params: aws[param[0]] = param[1] else: raise Exception('Section {0} not found in the {1} file'.format(section, filename)) return aws # Get credentials parameters (params['aws_access_key_id'], params['aws_secret_access_key'], params['aws_region']) params = config(credentials_file,'credentials') # Create a session session = boto3.Session( aws_access_key_id=params['aws_access_key_id'], aws_secret_access_key=params['aws_secret_access_key'], region_name=params['aws_region'], ) # Get list of users and groups def iam_list(): iam = session.client('iam') user = {} for userlist in iam.list_users()['Users']: userGroups = iam.list_groups_for_user(UserName=userlist['UserName']) user[userlist['UserName']]={'groups':''} groupnames = [groupName['GroupName'] for groupName in userGroups['Groups']] user[userlist['UserName']] = groupnames return user # List ec2 instances def ec2_list_instances(): ec2 = session.resource('ec2') instances = ec2.instances.all() return instances # List RDS instances def rds_list_instances(): rds = session.client('rds') dbs = rds.describe_db_instances() return dbs # List S3 buckets def s3_list_buckets(): s3 = session.resource('s3') s3buckets=s3.buckets.all() return s3buckets # List ECR repos def ecr_list_repos(): ecr = session.client('ecr') repos = ecr.describe_repositories()['repositories'] return repos # List ECS clusters def ecs_list_clusters(): ecs = session.client('ecs') clusters = ecs.list_clusters()['clusterArns'] return clusters def menu(): if svc == 'iam': users = iam_list() print("Username\tMember Of Groups") print("--------------------------------") for user in users: print("{}\t{}".format(user, ', '.join(users[user]))) elif svc == 'ec2': instances = ec2_list_instances() print("Instance ID\t\tState\t\t Private IP\t\tPublic IP") for instance in instances: print("{}\t{}\t\t{}\t\t{}".format(instance.id, instance.state['Name'], instance.private_ip_address, instance.public_ip_address)) elif svc == 'rds': dbs = rds_list_instances() try: print("Database Name\tDatabase Endpoint\t\t\t\t\t\t\tStatus") for db in dbs['DBInstances']: print("{}\t{}@{}:{}\t{}".format(db['DBInstanceIdentifier'],db['MasterUsername'],db['Endpoint']['Address'],db['Endpoint']['Port'],db['DBInstanceStatus'])) except Exception as error: print(error) elif svc == 's3': s3 = s3_list_buckets() print("Bucket Name") print("-----------------") for bucket in s3: print(bucket.name) elif svc == 'ecr': repos = ecr_list_repos() print("Repository Name") print("-----------------") for repo in repos: print(repo['repositoryName']) elif svc == 'ecs': clusters=ecs_list_clusters() print("Cluster ARN") print("-----------------") for cluster in clusters: print(cluster) if __name__ == "__main__": menu()
33.555556
171
0.595317
4a012cc058495a05c74b625b04f3d34d91d0fdeb
6,278
py
Python
src/yolov5/utils/downloads.py
etiennecollin/glycine-max-mitosis-ml
cacd68f2a56df078cb1193bcc7ed4a0f86a659d4
[ "MIT" ]
1
2022-02-17T13:37:47.000Z
2022-02-17T13:37:47.000Z
src/yolov5/utils/downloads.py
etiennecollin/glycine-max-mitosis-ml
cacd68f2a56df078cb1193bcc7ed4a0f86a659d4
[ "MIT" ]
null
null
null
src/yolov5/utils/downloads.py
etiennecollin/glycine-max-mitosis-ml
cacd68f2a56df078cb1193bcc7ed4a0f86a659d4
[ "MIT" ]
1
2022-02-16T02:23:09.000Z
2022-02-16T02:23:09.000Z
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Download utils """ import os import platform import subprocess import time import urllib from pathlib import Path from zipfile import ZipFile import requests import torch def gsutil_getsize(url=''): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') return eval(s.split(' ')[0]) if len(s) else 0 # bytes def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes file = Path(file) assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" try: # url1 print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file)) assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check except Exception as e: # url2 file.unlink(missing_ok=True) # remove partial downloads print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check file.unlink(missing_ok=True) # remove partial downloads print(f"ERROR: {assert_msg}\n{error_msg}") print('') def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download() # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): # URL specified name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. if str(file).startswith(('http:/', 'https:/')): # download url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... if Path(file).is_file(): print(f'Found {url} locally at {file}') # file already exists else: safe_download(file=file, url=url, min_bytes=1E5) return file # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] tag = response['tag_name'] # i.e. 'v1.0' except Exception: # fallback plan assets = [ 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] try: tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] except Exception: tag = 'v6.0' # current release if name in assets: safe_download( file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) min_bytes=1E5, error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') return str(file) def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() t = time.time() file = Path(file) cookie = Path('cookie') # gdrive cookie print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') file.unlink(missing_ok=True) # remove existing file cookie.unlink(missing_ok=True) # remove existing cookie # Attempt file download out = "NUL" if platform.system() == "Windows" else "/dev/null" os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') if os.path.exists('cookie'): # large file s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' else: # small file s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' r = os.system(s) # execute, capture return cookie.unlink(missing_ok=True) # remove existing cookie # Error check if r != 0: file.unlink(missing_ok=True) # remove partial print('Download error ') # raise Exception('Download error') return r # Unzip if archive if file.suffix == '.zip': print('unzipping... ', end='') ZipFile(file).extractall(path=file.parent) # unzip file.unlink() # remove zip print(f'Done ({time.time() - t:.1f}s)') return r def get_token(cookie="./cookie"): with open(cookie) as f: for line in f: if "download" in line: return line.split()[-1] return "" # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- # # # def upload_blob(bucket_name, source_file_name, destination_blob_name): # # Uploads a file to a bucket # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python # # storage_client = storage.Client() # bucket = storage_client.get_bucket(bucket_name) # blob = bucket.blob(destination_blob_name) # # blob.upload_from_filename(source_file_name) # # print('File {} uploaded to {}.'.format( # source_file_name, # destination_blob_name)) # # # def download_blob(bucket_name, source_blob_name, destination_file_name): # # Uploads a blob from a bucket # storage_client = storage.Client() # bucket = storage_client.get_bucket(bucket_name) # blob = bucket.blob(source_blob_name) # # blob.download_to_filename(destination_file_name) # # print('Blob {} downloaded to {}.'.format( # source_blob_name, # destination_file_name))
39.987261
120
0.617713
4a012cddb29e4e546bf46971b113a335f343b574
651
py
Python
ESP32/FT.py
Redns/WO
4996f768623b2f26dd5e765d87d2d7d6074af0af
[ "MIT" ]
1
2021-05-09T10:26:42.000Z
2021-05-09T10:26:42.000Z
ESP32/FT.py
Redns/WO
4996f768623b2f26dd5e765d87d2d7d6074af0af
[ "MIT" ]
null
null
null
ESP32/FT.py
Redns/WO
4996f768623b2f26dd5e765d87d2d7d6074af0af
[ "MIT" ]
null
null
null
from machine import UART import network, usocket, time #创建wlan对象 wl = network.WLAN(network.STA_IF) wl.active(True) uart = UART(2, baudrate=115200, rx=16, tx=17, timeout=10) #服务器和ESP32应在同一个局域网下,否则无法通信 wl.connect('wifi_name', 'wifi_password') #在WIFI连接完毕前不能连接socket while not wl.isconnected(): pass #创建usocket对象并连接 s = usocket.socket() s.connect(('server_ip', 8888)) #向服务器发送身份ID data = s.recv(2) while True: if bytes.decode(data)=='ID': break data = s.recv(2) s.send('ESP32') while True: #串口发来数据时 if uart.any(): sensor_msg = uart.readline() s.send(sensor_msg) print(bytes.decode(sensor_msg))
17.131579
57
0.677419
4a012d5a155ee2e74662bfe204b79723a1d0adbe
8,106
py
Python
advrush/adv_train.py
nutellamok/advrush
a15b26a4ef4e919783676b67647ec0ec8723d3b6
[ "Apache-2.0" ]
8
2021-12-10T02:52:12.000Z
2022-03-05T01:51:48.000Z
advrush/adv_train.py
nutellamok/advrush
a15b26a4ef4e919783676b67647ec0ec8723d3b6
[ "Apache-2.0" ]
null
null
null
advrush/adv_train.py
nutellamok/advrush
a15b26a4ef4e919783676b67647ec0ec8723d3b6
[ "Apache-2.0" ]
4
2021-12-10T03:03:46.000Z
2022-03-24T08:45:26.000Z
import os import sys import time import glob import numpy as np import torch import utils import logging import argparse import torch.nn as nn import genotypes import torch.utils import torchvision.datasets as dset import torch.backends.cudnn as cudnn from torch.autograd import Variable from model import NetworkCIFAR as Network from trades import trades_loss, madry_loss parser = argparse.ArgumentParser("cifar") parser.add_argument('--data', type=str, default='../data', help='location of the data corpus') parser.add_argument('--batch_size', type=int, default=64, help='batch size') #128 parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate') parser.add_argument('--momentum', type=float, default=0.9, help='momentum') parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay') parser.add_argument('--report_freq', type=float, default=50, help='report frequency') parser.add_argument('--gpu', type=int, default=0, help='gpu device id') parser.add_argument('--epochs', type=int, default=200, help='num of training epochs') parser.add_argument('--epsilon', type=float, default=0.031, help='perturbation') parser.add_argument('--num_steps', type=int, default=7, help='perturb number of steps') parser.add_argument('--step_size', type=float, default=0.01, help='perturb step size') parser.add_argument('--beta', type=float, default=6.0, help='regularization in TRADES') parser.add_argument('--adv_loss', type=str, default='pgd', help='experiment name') parser.add_argument('--init_channels', type=int, default=36, help='num of init channels') parser.add_argument('--layers', type=int, default=20, help='total number of layers') parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model') parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower') parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss') parser.add_argument('--cutout', action='store_true', default=False, help='use cutout') parser.add_argument('--cutout_length', type=int, default=16, help='cutout length') parser.add_argument('--drop_path_prob', type=float, default=0.0, help='drop path probability') parser.add_argument('--save', type=str, default='EXP', help='experiment name') parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument('--arch', type=str, default='ADVRUSH', help='which architecture to use') parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping') args = parser.parse_args() args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S")) utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py')) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') fh = logging.FileHandler(os.path.join(args.save, 'log.txt')) fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) CIFAR_CLASSES = 10 def main(): if not torch.cuda.is_available(): logging.info('no gpu device available') sys.exit(1) np.random.seed(args.seed) torch.cuda.set_device(args.gpu) cudnn.benchmark = True torch.manual_seed(args.seed) cudnn.enabled=True torch.cuda.manual_seed(args.seed) logging.info('gpu device = %d' % args.gpu) logging.info("args = %s", args) genotype = eval("genotypes.%s" % args.arch) model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype) model = model.cuda() logging.info("param size = %fMB", utils.count_parameters_in_MB(model)) criterion = nn.CrossEntropyLoss() criterion = criterion.cuda() optimizer = torch.optim.SGD( model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay ) train_transform, valid_transform = utils._data_transforms_cifar10_eval(args) train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform) valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform) train_queue = torch.utils.data.DataLoader( train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2) valid_queue = torch.utils.data.DataLoader( valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2) best_acc = 0.0 for epoch in range(args.epochs): adjust_learning_rate(optimizer, epoch) model.drop_path_prob = args.drop_path_prob * epoch / args.epochs train_acc, train_obj = train(train_queue, model, criterion, optimizer) logging.info('epoch %d train_acc %f', epoch, train_acc) valid_acc, valid_obj = infer(valid_queue, model, criterion) if valid_acc > best_acc: best_acc = valid_acc utils.save_checkpoint({ 'epoch': epoch +1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, is_best=True, save=args.save, epoch=epoch) logging.info('epoch %d valid_acc %f, best_acc %f', epoch, valid_acc, best_acc) utils.save(model, os.path.join(args.save, 'weights.pt')) utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, is_best=False, save=args.save, epoch=epoch) def train(train_queue, model, criterion, optimizer): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.train() for step, (input, target) in enumerate(train_queue): input = Variable(input).cuda(non_blocking=True) target = Variable(target).cuda(non_blocking=True) optimizer.zero_grad() logits, logits_aux = model(input) if args.adv_loss == 'pgd': loss = madry_loss( model, input, target, optimizer, step_size = args.step_size, epsilon = args.epsilon, perturb_steps = args.num_steps) elif args.adv_loss == 'trades': loss = trades_loss(model, input, target, optimizer, step_size=args.step_size, epsilon=args.epsilon, perturb_steps=args.num_steps, beta=args.beta, distance='l_inf') #loss = criterion(logits, target) if args.auxiliary: loss_aux = criterion(logits_aux, target) loss += args.auxiliary_weight*loss_aux loss.backward() nn.utils.clip_grad_norm(model.parameters(), args.grad_clip) optimizer.step() prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data.item(), n) top1.update(prec1.data.item(), n) top5.update(prec5.data.item(), n) if step % args.report_freq == 0: logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg def infer(valid_queue, model, criterion): objs = utils.AvgrageMeter() top1 = utils.AvgrageMeter() top5 = utils.AvgrageMeter() model.eval() with torch.no_grad(): for step, (input, target) in enumerate(valid_queue): input = Variable(input, requires_grad=False).cuda(non_blocking=True) target = Variable(target, requires_grad=False).cuda(non_blocking=True) logits, _ = model(input) loss = criterion(logits, target) prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5)) n = input.size(0) objs.update(loss.data.item(), n) top1.update(prec1.data.item(), n) top5.update(prec5.data.item(), n) if step % args.report_freq == 0: logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg) return top1.avg, objs.avg def adjust_learning_rate(optimizer, epoch): """decrease the learning rate""" lr = args.learning_rate if epoch >= 99: lr = args.learning_rate * 0.1 if epoch >= 149: lr = args.learning_rate * 0.01 for param_group in optimizer.param_groups: param_group['lr'] = lr if __name__ == '__main__': main()
37.527778
100
0.697138
4a012dfb707def9476d729941e068f808e192c3d
3,826
py
Python
cyber/python/cyber_py/examples/record.py
dustinksi/Edith
09f151612fadd2155b89c208e2af91c41d837a03
[ "Apache-2.0" ]
3
2020-02-07T13:09:50.000Z
2020-08-31T12:37:48.000Z
cyber/python/cyber_py/examples/record.py
dustinksi/Edith
09f151612fadd2155b89c208e2af91c41d837a03
[ "Apache-2.0" ]
null
null
null
cyber/python/cyber_py/examples/record.py
dustinksi/Edith
09f151612fadd2155b89c208e2af91c41d837a03
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python2 # **************************************************************************** # Copyright 2020 The Edith Author. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # **************************************************************************** # -*- coding: utf-8 -*- """Module for example of record.""" import time from cyber.proto.unit_test_pb2 import Chatter from cyber_py import record from google.protobuf.descriptor_pb2 import FileDescriptorProto from modules.common.util.proto.simple_pb2 import SimpleMessage MSG_TYPE = "edith.common.util.test.SimpleMessage" MSG_TYPE_CHATTER = "edith.cyber.proto.Chatter" def test_record_writer(writer_path): """ Record writer. """ fwriter = record.RecordWriter() fwriter.set_size_fileseg(0) fwriter.set_intervaltime_fileseg(0) if not fwriter.open(writer_path): print('Failed to open record writer!') return print('+++ Begin to writer +++') # Writer 2 SimpleMessage msg = SimpleMessage() msg.text = "AAAAAA" file_desc = msg.DESCRIPTOR.file proto = FileDescriptorProto() file_desc.CopyToProto(proto) proto.name = file_desc.name desc_str = proto.SerializeToString() print(msg.DESCRIPTOR.full_name) fwriter.write_channel( 'simplemsg_channel', msg.DESCRIPTOR.full_name, desc_str) fwriter.write_message('simplemsg_channel', msg, 990, False) fwriter.write_message('simplemsg_channel', msg.SerializeToString(), 991) # Writer 2 Chatter msg = Chatter() msg.timestamp = 99999 msg.lidar_timestamp = 100 msg.seq = 1 file_desc = msg.DESCRIPTOR.file proto = FileDescriptorProto() file_desc.CopyToProto(proto) proto.name = file_desc.name desc_str = proto.SerializeToString() print(msg.DESCRIPTOR.full_name) fwriter.write_channel('chatter_a', msg.DESCRIPTOR.full_name, desc_str) fwriter.write_message('chatter_a', msg, 992, False) msg.seq = 2 fwriter.write_message("chatter_a", msg.SerializeToString(), 993) fwriter.close() def test_record_reader(reader_path): """ Record reader. """ freader = record.RecordReader(reader_path) time.sleep(1) print('+' * 80) print('+++ Begin to read +++') count = 0 for channel_name, msg, datatype, timestamp in freader.read_messages(): count += 1 print('=' * 80) print('read [%d] messages' % count) print('channel_name -> %s' % channel_name) print('msgtime -> %d' % timestamp) print('msgnum -> %d' % freader.get_messagenumber(channel_name)) print('msgtype -> %s' % datatype) print('message is -> %s' % msg) print('***After parse(if needed),the message is ->') if datatype == MSG_TYPE: msg_new = SimpleMessage() msg_new.ParseFromString(msg) print(msg_new) elif datatype == MSG_TYPE_CHATTER: msg_new = Chatter() msg_new.ParseFromString(msg) print(msg_new) if __name__ == '__main__': test_record_file = "/tmp/test_writer.record" print('Begin to write record file: {}'.format(test_record_file)) test_record_writer(test_record_file) print('Begin to read record file: {}'.format(test_record_file)) test_record_reader(test_record_file)
32.423729
78
0.655776
4a012ea30201dc246202f4696f2759f385e32e71
13,794
py
Python
autotest/utilities/test_gdal_rasterize.py
trundev/gdal
d5777940975f2784980ef0b7561eeeb655fd0ab5
[ "MIT" ]
2
2018-03-22T22:31:00.000Z
2021-07-16T01:34:47.000Z
autotest/utilities/test_gdal_rasterize.py
trundev/gdal
d5777940975f2784980ef0b7561eeeb655fd0ab5
[ "MIT" ]
3
2019-02-27T00:43:06.000Z
2019-06-28T21:57:10.000Z
autotest/utilities/test_gdal_rasterize.py
trundev/gdal
d5777940975f2784980ef0b7561eeeb655fd0ab5
[ "MIT" ]
1
2021-11-21T02:33:51.000Z
2021-11-21T02:33:51.000Z
#!/usr/bin/env pytest # -*- coding: utf-8 -*- ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: gdal_rasterize testing # Author: Even Rouault <even dot rouault @ spatialys.com> # ############################################################################### # Copyright (c) 2010-2013, Even Rouault <even dot rouault at spatialys.com> # Copyright (c) 2008, Frank Warmerdam <warmerdam@pobox.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import sys import os import pytest sys.path.append('../gcore') from osgeo import gdal from osgeo import ogr from osgeo import osr import gdaltest import test_cli_utilities ############################################################################### # Simple polygon rasterization (adapted from alg/rasterize.py). def test_gdal_rasterize_1(): if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() # Setup working spatial reference # sr_wkt = 'LOCAL_CS["arbitrary"]' # sr = osr.SpatialReference( sr_wkt ) sr = osr.SpatialReference() sr.ImportFromEPSG(32631) sr_wkt = sr.ExportToWkt() # Create a raster to rasterize into. target_ds = gdal.GetDriverByName('GTiff').Create('tmp/rast1.tif', 100, 100, 3, gdal.GDT_Byte) target_ds.SetGeoTransform((1000, 1, 0, 1100, 0, -1)) target_ds.SetProjection(sr_wkt) # Close TIF file target_ds = None # Create a layer to rasterize from. rast_ogr_ds = \ ogr.GetDriverByName('MapInfo File').CreateDataSource('tmp/rast1.tab') rast_lyr = rast_ogr_ds.CreateLayer('rast1', srs=sr) rast_lyr.GetLayerDefn() field_defn = ogr.FieldDefn('foo') rast_lyr.CreateField(field_defn) # Add a polygon. wkt_geom = 'POLYGON((1020 1030,1020 1045,1050 1045,1050 1030,1020 1030))' feat = ogr.Feature(rast_lyr.GetLayerDefn()) feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom)) rast_lyr.CreateFeature(feat) # Add feature without geometry to test fix for #3310 feat = ogr.Feature(rast_lyr.GetLayerDefn()) rast_lyr.CreateFeature(feat) # Add a linestring. wkt_geom = 'LINESTRING(1000 1000, 1100 1050)' feat = ogr.Feature(rast_lyr.GetLayerDefn()) feat.SetGeometryDirectly(ogr.Geometry(wkt=wkt_geom)) rast_lyr.CreateFeature(feat) # Close file rast_ogr_ds.Destroy() # Run the algorithm. (_, err) = gdaltest.runexternal_out_and_err(test_cli_utilities.get_gdal_rasterize_path() + ' -b 3 -b 2 -b 1 -burn 200 -burn 220 -burn 240 -l rast1 tmp/rast1.tab tmp/rast1.tif') assert (err is None or err == ''), 'got error/warning' # Check results. target_ds = gdal.Open('tmp/rast1.tif') expected = 6452 checksum = target_ds.GetRasterBand(2).Checksum() assert checksum == expected, 'Did not get expected image checksum' target_ds = None ############################################################################### # Test rasterization with ALL_TOUCHED (adapted from alg/rasterize.py). def test_gdal_rasterize_2(): if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() # Create a raster to rasterize into. target_ds = gdal.GetDriverByName('GTiff').Create('tmp/rast2.tif', 12, 12, 3, gdal.GDT_Byte) target_ds.SetGeoTransform((0, 1, 0, 12, 0, -1)) # Close TIF file target_ds = None # Run the algorithm. gdaltest.runexternal(test_cli_utilities.get_gdal_rasterize_path() + ' -at -b 3 -b 2 -b 1 -burn 200 -burn 220 -burn 240 -l cutline ../alg/data/cutline.csv tmp/rast2.tif') # Check results. target_ds = gdal.Open('tmp/rast2.tif') expected = 121 checksum = target_ds.GetRasterBand(2).Checksum() assert checksum == expected, 'Did not get expected image checksum' target_ds = None ############################################################################### # Test creating an output file def test_gdal_rasterize_3(): if test_cli_utilities.get_gdal_contour_path() is None: pytest.skip() if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() gdaltest.runexternal(test_cli_utilities.get_gdal_contour_path() + ' ../gdrivers/data/n43.dt0 tmp/n43dt0.shp -i 10 -3d') gdaltest.runexternal(test_cli_utilities.get_gdal_rasterize_path() + ' -3d tmp/n43dt0.shp tmp/n43dt0.tif -l n43dt0 -ts 121 121 -a_nodata 0 -q') ds_ref = gdal.Open('../gdrivers/data/n43.dt0') ds = gdal.Open('tmp/n43dt0.tif') assert ds.GetRasterBand(1).GetNoDataValue() == 0.0, \ 'did not get expected nodata value' assert ds.RasterXSize == 121 and ds.RasterYSize == 121, \ 'did not get expected dimensions' gt_ref = ds_ref.GetGeoTransform() gt = ds.GetGeoTransform() for i in range(6): assert gt[i] == pytest.approx(gt_ref[i], abs=1e-6), 'did not get expected geotransform' wkt = ds.GetProjectionRef() assert wkt.find("WGS_1984") != -1, 'did not get expected SRS' ############################################################################### # Same but with -tr argument def test_gdal_rasterize_4(): if test_cli_utilities.get_gdal_contour_path() is None: pytest.skip() if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() gdal.GetDriverByName('GTiff').Delete('tmp/n43dt0.tif') gdaltest.runexternal(test_cli_utilities.get_gdal_rasterize_path() + ' -3d tmp/n43dt0.shp tmp/n43dt0.tif -l n43dt0 -tr 0.008333333333333 0.008333333333333 -a_nodata 0 -a_srs EPSG:4326') ds_ref = gdal.Open('../gdrivers/data/n43.dt0') ds = gdal.Open('tmp/n43dt0.tif') assert ds.GetRasterBand(1).GetNoDataValue() == 0.0, \ 'did not get expected nodata value' # Allow output to grow by 1/2 cell, as per #6058 assert ds.RasterXSize == 122 and ds.RasterYSize == 122, \ 'did not get expected dimensions' gt_ref = ds_ref.GetGeoTransform() gt = ds.GetGeoTransform() assert gt[1] == pytest.approx(gt_ref[1], abs=1e-6) and gt[5] == pytest.approx(gt_ref[5], abs=1e-6), \ 'did not get expected geotransform(dx/dy)' # Allow output to grow by 1/2 cell, as per #6058 assert (abs(gt[0] + (gt[1] / 2) - gt_ref[0]) <= 1e-6 and \ abs(gt[3] + (gt[5] / 2) - gt_ref[3]) <= 1e-6), \ 'did not get expected geotransform' wkt = ds.GetProjectionRef() assert wkt.find("WGS_1984") != -1, 'did not get expected SRS' ############################################################################### # Test point rasterization (#3774) def test_gdal_rasterize_5(): if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() f = open('tmp/test_gdal_rasterize_5.csv', 'wb') f.write("""x,y,Value 0.5,0.5,1 0.5,2.5,2 2.5,2.5,3 2.5,0.5,4 1.5,1.5,5""".encode('ascii')) f.close() f = open('tmp/test_gdal_rasterize_5.vrt', 'wb') f.write("""<OGRVRTDataSource> <OGRVRTLayer name="test"> <SrcDataSource relativetoVRT="1">test_gdal_rasterize_5.csv</SrcDataSource> <SrcLayer>test_gdal_rasterize_5</SrcLayer> <GeometryType>wkbPoint</GeometryType> <GeometryField encoding="PointFromColumns" x="x" y="y"/> </OGRVRTLayer> </OGRVRTDataSource>""".encode('ascii')) f.close() gdaltest.runexternal(test_cli_utilities.get_gdal_rasterize_path() + ' -l test tmp/test_gdal_rasterize_5.vrt tmp/test_gdal_rasterize_5.tif -a Value -tr 1 1 -ot Byte') ds = gdal.Open('tmp/test_gdal_rasterize_5.tif') assert ds.RasterXSize == 3 and ds.RasterYSize == 3, \ 'did not get expected dimensions' gt_ref = [0, 1, 0, 3, 0, -1] gt = ds.GetGeoTransform() for i in range(6): assert gt[i] == pytest.approx(gt_ref[i], abs=1e-6), 'did not get expected geotransform' data = ds.GetRasterBand(1).ReadRaster(0, 0, 3, 3) assert data.decode('iso-8859-1') == '\x02\x00\x03\x00\x05\x00\x01\x00\x04', \ 'did not get expected values' ds = None ############################################################################### # Test on the fly reprojection of input data def test_gdal_rasterize_6(): if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() f = open('tmp/test_gdal_rasterize_6.csv', 'wb') f.write("""WKT,Value "POLYGON((2 49,2 50,3 50,3 49,2 49))",255 """.encode('ascii')) f.close() f = open('tmp/test_gdal_rasterize_6.prj', 'wb') f.write("""EPSG:4326""".encode('ascii')) f.close() ds = gdal.GetDriverByName('GTiff').Create('tmp/test_gdal_rasterize_6.tif', 100, 100) ds.SetGeoTransform([200000, (400000 - 200000) / 100, 0, 6500000, 0, -(6500000 - 6200000) / 100]) sr = osr.SpatialReference() sr.ImportFromEPSG(3857) ds.SetProjection(sr.ExportToWkt()) ds = None gdaltest.runexternal(test_cli_utilities.get_gdal_rasterize_path() + ' -l test_gdal_rasterize_6 tmp/test_gdal_rasterize_6.csv tmp/test_gdal_rasterize_6.tif -a Value') ds = gdal.Open('tmp/test_gdal_rasterize_6.tif') assert ds.GetRasterBand(1).Checksum() == 39190, 'did not get expected checksum' ds = None ############################################################################### # Test SQLITE dialect in SQL def test_gdal_rasterize_7(): try: from osgeo import gdalnumeric gdalnumeric.zeros except (ImportError, AttributeError): pytest.skip() if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() drv = ogr.GetDriverByName('SQLite') if drv is None: pytest.skip() gdal.PushErrorHandler('CPLQuietErrorHandler') ds = drv.CreateDataSource('/vsimem/foo.db', options=['SPATIALITE=YES']) if ds is None: pytest.skip() ds = None gdal.Unlink('/vsimem/foo.db') gdal.PopErrorHandler() f = open('tmp/test_gdal_rasterize_7.csv', 'wb') x = (0, 0, 50, 50, 25) y = (0, 50, 0, 50, 25) f.write('WKT,Value\n'.encode('ascii')) for i, xi in enumerate(x): r = 'POINT(%d %d),1\n' % (xi, y[i]) f.write(r.encode('ascii')) f.close() cmds = '''tmp/test_gdal_rasterize_7.csv tmp/test_gdal_rasterize_7.tif -init 0 -burn 1 -sql "SELECT ST_Buffer(GEOMETRY, 2) FROM test_gdal_rasterize_7" -dialect sqlite -tr 1 1 -te -1 -1 51 51''' gdaltest.runexternal(test_cli_utilities.get_gdal_rasterize_path() + ' ' + cmds) ds = gdal.Open('tmp/test_gdal_rasterize_7.tif') data = ds.GetRasterBand(1).ReadAsArray() assert data.sum() > 5, 'Only rasterized 5 pixels or less.' ds = None ############################################################################### # Make sure we create output that encompasses all the input points on a point # layer, #6058. def test_gdal_rasterize_8(): if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() f = open('tmp/test_gdal_rasterize_8.csv', 'wb') f.write('WKT,Value\n'.encode('ascii')) f.write('"LINESTRING (0 0, 5 5, 10 0, 10 10)",1'.encode('ascii')) f.close() cmds = '''tmp/test_gdal_rasterize_8.csv tmp/test_gdal_rasterize_8.tif -init 0 -burn 1 -tr 1 1''' gdaltest.runexternal(test_cli_utilities.get_gdal_rasterize_path() + ' ' + cmds) ds = gdal.Open('tmp/test_gdal_rasterize_8.tif') cs = ds.GetRasterBand(1).Checksum() assert cs == 21, 'Did not rasterize line data properly' ds = None ########################################### def test_gdal_rasterize_cleanup(): if test_cli_utilities.get_gdal_rasterize_path() is None: pytest.skip() gdal.GetDriverByName('GTiff').Delete('tmp/rast1.tif') ogr.GetDriverByName('MapInfo File').DeleteDataSource('tmp/rast1.tab') gdal.GetDriverByName('GTiff').Delete('tmp/rast2.tif') ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/n43dt0.shp') gdal.GetDriverByName('GTiff').Delete('tmp/n43dt0.tif') gdal.GetDriverByName('GTiff').Delete('tmp/test_gdal_rasterize_5.tif') os.unlink('tmp/test_gdal_rasterize_5.csv') os.unlink('tmp/test_gdal_rasterize_5.vrt') gdal.GetDriverByName('GTiff').Delete('tmp/test_gdal_rasterize_6.tif') os.unlink('tmp/test_gdal_rasterize_6.csv') os.unlink('tmp/test_gdal_rasterize_6.prj') if os.path.exists('tmp/test_gdal_rasterize_7.tif'): gdal.GetDriverByName('GTiff').Delete('tmp/test_gdal_rasterize_7.tif') if os.path.exists('tmp/test_gdal_rasterize_7.csv'): os.unlink('tmp/test_gdal_rasterize_7.csv') gdal.GetDriverByName('GTiff').Delete('tmp/test_gdal_rasterize_8.tif') os.unlink('tmp/test_gdal_rasterize_8.csv')
33.808824
189
0.633464
4a012f85055fa4c3ee8730577192c7505f7088a1
6,704
py
Python
domainbed/hparams_registry.py
arobey1/mbdg
b4e768a6d31ab1e2cb0f0a3aad76832895068876
[ "MIT" ]
27
2021-06-16T23:59:49.000Z
2022-03-15T16:17:54.000Z
domainbed/hparams_registry.py
arobey1/mbdg
b4e768a6d31ab1e2cb0f0a3aad76832895068876
[ "MIT" ]
2
2021-08-30T12:23:08.000Z
2021-11-16T19:35:26.000Z
domainbed/hparams_registry.py
arobey1/mbdg
b4e768a6d31ab1e2cb0f0a3aad76832895068876
[ "MIT" ]
5
2021-06-17T07:14:18.000Z
2022-01-13T04:00:06.000Z
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import numpy as np from domainbed.lib import misc import os def _define_hparam(hparams, hparam_name, default_val, random_val_fn): hparams[hparam_name] = (hparams, hparam_name, default_val, random_val_fn) def _hparams(algorithm, dataset, random_seed, test_envs): """ Global registry of hyperparams. Each entry is a (default, random) tuple. New algorithms / networks / etc. should add entries here. """ SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST'] hparams = {} def _hparam(name, default_val, random_val_fn): """Define a hyperparameter. random_val_fn takes a RandomState and returns a random hyperparameter value.""" assert(name not in hparams) random_state = np.random.RandomState( misc.seed_hash(random_seed, name) ) hparams[name] = (default_val, random_val_fn(random_state)) # Unconditional hparam definitions. _hparam('data_augmentation', True, lambda r: True) _hparam('resnet18', False, lambda r: False) _hparam('resnet_dropout', 0., lambda r: r.choice([0., 0.1, 0.5])) _hparam('class_balanced', False, lambda r: False) # TODO: nonlinear classifiers disabled _hparam('nonlinear_classifier', False, lambda r: bool(r.choice([False, False]))) # Algorithm-specific hparam definitions. Each block of code below # corresponds to exactly one algorithm. if algorithm in ['DANN', 'CDANN']: _hparam('lambda', 1.0, lambda r: 10**r.uniform(-2, 2)) _hparam('weight_decay_d', 0., lambda r: 10**r.uniform(-6, -2)) _hparam('d_steps_per_g_step', 1, lambda r: int(2**r.uniform(0, 3))) _hparam('grad_penalty', 0., lambda r: 10**r.uniform(-2, 1)) _hparam('beta1', 0.5, lambda r: r.choice([0., 0.5])) _hparam('mlp_width', 256, lambda r: int(2 ** r.uniform(6, 10))) _hparam('mlp_depth', 3, lambda r: int(r.choice([3, 4, 5]))) _hparam('mlp_dropout', 0., lambda r: r.choice([0., 0.1, 0.5])) elif algorithm == 'Fish': _hparam('meta_lr', 0.5, lambda r:r.choice([0.05, 0.1, 0.5])) elif algorithm == 'MBDG_Reg' or algorithm == 'MBDG_DA': _hparam('mbdg_lam_dist', 1.0, lambda r: r.uniform(0.5, 10.0)) elif algorithm == 'MBDG': _hparam('mbdg_dual_step_size', 0.05, lambda r: r.uniform(0.001, 0.1)) # 0.05 _hparam('mbdg_gamma', 0.025, lambda r: r.uniform(0.0001, 0.01)) # 0.01 elif algorithm == "RSC": _hparam('rsc_f_drop_factor', 1/3, lambda r: r.uniform(0, 0.5)) _hparam('rsc_b_drop_factor', 1/3, lambda r: r.uniform(0, 0.5)) elif algorithm == "SagNet": _hparam('sag_w_adv', 0.1, lambda r: 10**r.uniform(-2, 1)) elif algorithm == "IRM": _hparam('irm_lambda', 1e2, lambda r: 10**r.uniform(-1, 5)) _hparam('irm_penalty_anneal_iters', 500, lambda r: int(10**r.uniform(0, 4))) elif algorithm == "Mixup": _hparam('mixup_alpha', 0.2, lambda r: 10**r.uniform(-1, -1)) elif algorithm == "GroupDRO": _hparam('groupdro_eta', 1e-2, lambda r: 10**r.uniform(-3, -1)) elif algorithm == "MMD" or algorithm == "CORAL": _hparam('mmd_gamma', 1., lambda r: 10**r.uniform(-1, 1)) elif algorithm == "MLDG": _hparam('mldg_beta', 1., lambda r: 10**r.uniform(-1, 1)) elif algorithm == "MTL": _hparam('mtl_ema', .99, lambda r: r.choice([0.5, 0.9, 0.99, 1.])) elif algorithm == "VREx": _hparam('vrex_lambda', 1e1, lambda r: 10**r.uniform(-1, 5)) _hparam('vrex_penalty_anneal_iters', 500, lambda r: int(10**r.uniform(0, 4))) elif algorithm == "SD": _hparam('sd_reg', 0.1, lambda r: 10**r.uniform(-5, -1)) elif algorithm == "ANDMask": _hparam('tau', 1, lambda r: r.uniform(0.5, 1.)) elif algorithm == "IGA": _hparam('penalty', 1000, lambda r: 10**r.uniform(1, 5)) # Dataset-and-algorithm-specific hparam definitions. Each block of code # below corresponds to exactly one hparam. Avoid nested conditionals. if dataset in SMALL_IMAGES: _hparam('lr', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5)) else: _hparam('lr', 5e-5, lambda r: 10**r.uniform(-5, -3.5)) if dataset in SMALL_IMAGES: _hparam('weight_decay', 0., lambda r: 0.) else: _hparam('weight_decay', 0., lambda r: 10**r.uniform(-6, -2)) if dataset in SMALL_IMAGES: _hparam('batch_size', 8, lambda r: int(2**r.uniform(3, 9))) elif algorithm == 'ARM': _hparam('batch_size', 8, lambda r: 8) elif dataset == 'DomainNet': _hparam('batch_size', 32, lambda r: int(2**r.uniform(3, 5))) else: # _hparam('batch_size', 32, lambda r: int(2**r.uniform(3, 5.5))) _hparam('batch_size', 16, lambda r: int(2**r.uniform(3, 4.5))) if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES: _hparam('lr_g', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5)) elif algorithm in ['DANN', 'CDANN']: _hparam('lr_g', 5e-5, lambda r: 10**r.uniform(-5, -3.5)) if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES: _hparam('lr_d', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5)) elif algorithm in ['DANN', 'CDANN']: _hparam('lr_d', 5e-5, lambda r: 10**r.uniform(-5, -3.5)) if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES: _hparam('weight_decay_g', 0., lambda r: 0.) elif algorithm in ['DANN', 'CDANN']: _hparam('weight_decay_g', 0., lambda r: 10**r.uniform(-6, -2)) if algorithm in ['MBDG', 'MBDG_Reg', 'MBDG_DA', 'MBDA']: if dataset == 'ColoredMNIST': model_root = './domainbed/munit/saved_models/colored_mnist' elif dataset == 'PACS': model_root = './domainbed/munit/saved_models/pacs/new' elif dataset == 'VLCS': model_root = './domainbed/munit/saved_models/vlcs/new' else: raise NotImplementedError(f'Dataset {dataset} not implemented for MBDG') model_path = os.path.join(model_root, f'model-dom{"".join([str(e) for e in test_envs])}.pt') config_path = os.path.join(model_root, 'config.yaml') _hparam('mbdg_model_path', model_path, lambda r: model_path) _hparam('mbdg_config_path', config_path, lambda r: config_path) return hparams def default_hparams(algorithm, dataset, test_envs): return {a: b for a, (b, c) in _hparams(algorithm, dataset, 0, test_envs).items()} def random_hparams(algorithm, dataset, seed, test_envs): return {a: c for a, (b, c) in _hparams(algorithm, dataset, seed, test_envs).items()}
39.204678
88
0.614708
4a012fc7324674457ef6d7cb2c525a1617939794
161
py
Python
mundo1/parte2/ex012.py
fcdennis/CursoPython
485ef7e706af74eae9ee336714ddd8b493bd8e5d
[ "MIT" ]
null
null
null
mundo1/parte2/ex012.py
fcdennis/CursoPython
485ef7e706af74eae9ee336714ddd8b493bd8e5d
[ "MIT" ]
null
null
null
mundo1/parte2/ex012.py
fcdennis/CursoPython
485ef7e706af74eae9ee336714ddd8b493bd8e5d
[ "MIT" ]
null
null
null
p = float(input("Qual é o preço do produto? R$")) print(f"O produto que custava R${p}, na promoção com desconto de 5% vai custar R${(p - ((p * 5)/100)):0.2f}.")
53.666667
110
0.627329
4a012fe6297787eb507ff35a66296c9fea2f0dcf
278
py
Python
Music.py
lifeofbaka/Star-Quest
73d6d901214a5c0a27417d13a1558ee3e7aafcfe
[ "MIT" ]
null
null
null
Music.py
lifeofbaka/Star-Quest
73d6d901214a5c0a27417d13a1558ee3e7aafcfe
[ "MIT" ]
null
null
null
Music.py
lifeofbaka/Star-Quest
73d6d901214a5c0a27417d13a1558ee3e7aafcfe
[ "MIT" ]
null
null
null
import pygame as pg import os pg.init() pg.mixer.init() # Returns error. Music only plays a bit like sound when player keys pressed. def My_Room_Music(): sound = pg.mixer.Sound(os.path.join('Music', 'Video Dungeon Crawl.wav')) sound.set_volume(0.05) sound.play(-1)
27.8
77
0.708633
4a0130a0172500d69fd22a351e028794e82ecd63
15,071
py
Python
docs/conf.py
loven-doo/aiohttp
01ef966b261bc6a8934b3c53c79c92f019b404a7
[ "Apache-2.0" ]
1
2020-08-12T01:31:16.000Z
2020-08-12T01:31:16.000Z
docs/conf.py
loven-doo/aiohttp
01ef966b261bc6a8934b3c53c79c92f019b404a7
[ "Apache-2.0" ]
202
2021-01-04T03:40:45.000Z
2022-03-31T08:08:47.000Z
docs/conf.py
loven-doo/aiohttp
01ef966b261bc6a8934b3c53c79c92f019b404a7
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # # aiohttp documentation build configuration file, created by # sphinx-quickstart on Wed Mar 5 12:35:35 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import re from pathlib import Path PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve() _docs_path = os.path.dirname(__file__) _version_path = os.path.abspath( os.path.join(_docs_path, "..", "aiohttp", "__init__.py") ) with open(_version_path, encoding="latin1") as fp: try: _version_info = re.search( r'^__version__ = "' r"(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)" r'(?P<tag>.*)?"$', fp.read(), re.M, ).groupdict() except IndexError: raise RuntimeError("Unable to determine version.") # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ # stdlib-party extensions: "sphinx.ext.extlinks", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", # Third-party extensions: "sphinxcontrib.asyncio", "sphinxcontrib.blockdiag", "sphinxcontrib.towncrier", # provides `towncrier-draft-entries` directive ] try: import sphinxcontrib.spelling # noqa extensions.append("sphinxcontrib.spelling") except ImportError: pass intersphinx_mapping = { "python": ("http://docs.python.org/3", None), "multidict": ("https://multidict.readthedocs.io/en/stable/", None), "yarl": ("https://yarl.readthedocs.io/en/stable/", None), "aiohttpjinja2": ("https://aiohttp-jinja2.readthedocs.io/en/stable/", None), "aiohttpremotes": ("https://aiohttp-remotes.readthedocs.io/en/stable/", None), "aiohttpsession": ("https://aiohttp-session.readthedocs.io/en/stable/", None), "aiohttpdemos": ("https://aiohttp-demos.readthedocs.io/en/latest/", None), "asynctest": ("https://asynctest.readthedocs.io/en/latest/", None), } # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # -- Project information ----------------------------------------------------- github_url = "https://github.com" github_repo_org = "aio-libs" github_repo_name = "aiohttp" github_repo_slug = f"{github_repo_org}/{github_repo_name}" github_repo_url = f"{github_url}/{github_repo_slug}" github_sponsors_url = f"{github_url}/sponsors" project = github_repo_name copyright = f"2013-2020, {project} maintainers" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "{major}.{minor}".format(**_version_info) # The full version, including alpha/beta/rc tags. release = "{major}.{minor}.{patch}{tag}".format(**_version_info) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. # pygments_style = 'sphinx' # The default language to highlight source code in. highlight_language = "python3" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Extension configuration ------------------------------------------------- # -- Options for extlinks extension --------------------------------------- extlinks = { "issue": (f"{github_repo_url}/issues/%s", "#"), "pr": (f"{github_repo_url}/pull/%s", "PR #"), "commit": (f"{github_repo_url}/commit/%s", ""), "gh": (f"{github_url}/%s", "GitHub: "), "user": (f"{github_sponsors_url}/%s", "@"), } # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "aiohttp_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "description": "Async HTTP client/server for asyncio and Python", "canonical_url": "http://docs.aiohttp.org/en/stable/", "github_user": github_repo_org, "github_repo": github_repo_name, "github_button": True, "github_type": "star", "github_banner": True, "badges": [ { "image": f"{github_repo_url}/workflows/CI/badge.svg", "target": f"{github_repo_url}/actions?query=workflow%3ACI", "height": "20", "alt": "Azure Pipelines CI status", }, { "image": f"https://codecov.io/github/{github_repo_slug}/coverage.svg?branch=master", "target": f"https://codecov.io/github/{github_repo_slug}", "height": "20", "alt": "Code coverage status", }, { "image": f"https://badge.fury.io/py/{project}.svg", "target": f"https://badge.fury.io/py/{project}", "height": "20", "alt": "Latest PyPI package version", }, { "image": f"https://img.shields.io/discourse/status?server=https%3A%2F%2F{github_repo_org}.discourse.group", "target": f"https://{github_repo_org}.discourse.group", "height": "20", "alt": "Discourse status", }, { "image": "https://badges.gitter.im/Join%20Chat.svg", "target": f"https://gitter.im/{github_repo_org}/Lobby", "height": "20", "alt": "Chat on Gitter", }, ], } html_css_files = [ "css/logo-adjustments.css", ] # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [alabaster.get_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "aiohttp-plain.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { "**": [ "about.html", "navigation.html", "searchbox.html", ] } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = f"{project}doc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( "index", f"{project}.tex", f"{project} Documentation", f"{project} contributors", "manual", ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [("index", project, f"{project} Documentation", [project], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( "index", project, f"{project} Documentation", "Aiohttp contributors", project, "One line description of project.", "Miscellaneous", ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # ------------------------------------------------------------------------- nitpicky = True nitpick_ignore = [ ("py:mod", "aiohttp"), # undocumented, no `.. currentmodule:: aiohttp` in docs ("py:class", "aiohttp.SimpleCookie"), # undocumented ("py:class", "aiohttp.web.RequestHandler"), # undocumented ("py:class", "aiohttp.NamedPipeConnector"), # undocumented ("py:meth", "aiohttp.ClientSession.request"), # undocumented ("py:class", "aiohttp.protocol.HttpVersion"), # undocumented ("py:class", "aiohttp.ClientRequest"), # undocumented ("py:class", "aiohttp.payload.Payload"), # undocumented ("py:class", "aiohttp.abc.AbstractResolver"), # undocumented ("py:func", "aiohttp.ws_connect"), # undocumented ("py:meth", "start"), # undocumented ("py:exc", "aiohttp.ClientHttpProxyError"), # undocumented ("py:class", "asyncio.AbstractServer"), # undocumented ("py:mod", "aiohttp.test_tools"), # undocumented ("py:class", "list of pairs"), # undocumented ("py:class", "aiohttp.protocol.HttpVersion"), # undocumented ("py:meth", "aiohttp.ClientSession.request"), # undocumented ("py:class", "aiohttp.StreamWriter"), # undocumented ("py:attr", "aiohttp.StreamResponse.body"), # undocumented ("py:class", "aiohttp.payload.StringPayload"), # undocumented ("py:meth", "aiohttp.web.Application.copy"), # undocumented ("py:meth", "asyncio.AbstractEventLoop.create_server"), # undocumented ("py:data", "aiohttp.log.server_logger"), # undocumented ("py:data", "aiohttp.log.access_logger"), # undocumented ("py:data", "aiohttp.helpers.AccessLogger"), # undocumented ("py:attr", "helpers.AccessLogger.LOG_FORMAT"), # undocumented ("py:meth", "aiohttp.web.AbstractRoute.url"), # undocumented ("py:class", "aiohttp.web.MatchedSubAppResource"), # undocumented ("py:attr", "body"), # undocumented ("py:class", "socket.socket"), # undocumented ("py:obj", "logging.DEBUG"), # undocumented ("py:class", "aiohttp.abc.AbstractAsyncAccessLogger"), # undocumented ("py:meth", "aiohttp.web.Response.write_eof"), # undocumented ("py:meth", "aiohttp.payload.Payload.set_content_disposition"), # undocumented ("py:class", "cgi.FieldStorage"), # undocumented ("py:meth", "aiohttp.web.UrlDispatcher.register_resource"), # undocumented ("py:func", "aiohttp_debugtoolbar.setup"), # undocumented ] # -- Options for towncrier_draft extension ----------------------------------- towncrier_draft_autoversion_mode = "draft" # or: 'sphinx-version', 'sphinx-release' towncrier_draft_include_empty = True towncrier_draft_working_directory = PROJECT_ROOT_DIR # Not yet supported: towncrier_draft_config_path = 'pyproject.toml' # relative to cwd
34.806005
119
0.659412
4a01312d7d6cbb266d5e8ccb6aedbb5607369d71
78,949
py
Python
tests/unit/gapic/errorreporting_v1beta1/test_error_stats_service.py
Ashish-Maradapa/python-error-reporting
c8e91b01908ecabe4fb51307970195f15916e3b7
[ "Apache-2.0" ]
null
null
null
tests/unit/gapic/errorreporting_v1beta1/test_error_stats_service.py
Ashish-Maradapa/python-error-reporting
c8e91b01908ecabe4fb51307970195f15916e3b7
[ "Apache-2.0" ]
null
null
null
tests/unit/gapic/errorreporting_v1beta1/test_error_stats_service.py
Ashish-Maradapa/python-error-reporting
c8e91b01908ecabe4fb51307970195f15916e3b7
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.errorreporting_v1beta1.services.error_stats_service import ( ErrorStatsServiceAsyncClient, ) from google.cloud.errorreporting_v1beta1.services.error_stats_service import ( ErrorStatsServiceClient, ) from google.cloud.errorreporting_v1beta1.services.error_stats_service import pagers from google.cloud.errorreporting_v1beta1.services.error_stats_service import transports from google.cloud.errorreporting_v1beta1.types import common from google.cloud.errorreporting_v1beta1.types import error_stats_service from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert ErrorStatsServiceClient._get_default_mtls_endpoint(None) is None assert ( ErrorStatsServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint ) assert ( ErrorStatsServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( ErrorStatsServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( ErrorStatsServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert ( ErrorStatsServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi ) @pytest.mark.parametrize( "client_class", [ErrorStatsServiceClient, ErrorStatsServiceAsyncClient,] ) def test_error_stats_service_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "clouderrorreporting.googleapis.com:443" @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.ErrorStatsServiceGrpcTransport, "grpc"), (transports.ErrorStatsServiceGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_error_stats_service_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize( "client_class", [ErrorStatsServiceClient, ErrorStatsServiceAsyncClient,] ) def test_error_stats_service_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "clouderrorreporting.googleapis.com:443" def test_error_stats_service_client_get_transport_class(): transport = ErrorStatsServiceClient.get_transport_class() available_transports = [ transports.ErrorStatsServiceGrpcTransport, ] assert transport in available_transports transport = ErrorStatsServiceClient.get_transport_class("grpc") assert transport == transports.ErrorStatsServiceGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (ErrorStatsServiceClient, transports.ErrorStatsServiceGrpcTransport, "grpc"), ( ErrorStatsServiceAsyncClient, transports.ErrorStatsServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) @mock.patch.object( ErrorStatsServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ErrorStatsServiceClient), ) @mock.patch.object( ErrorStatsServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ErrorStatsServiceAsyncClient), ) def test_error_stats_service_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. with mock.patch.object(ErrorStatsServiceClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(ErrorStatsServiceClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ ( ErrorStatsServiceClient, transports.ErrorStatsServiceGrpcTransport, "grpc", "true", ), ( ErrorStatsServiceAsyncClient, transports.ErrorStatsServiceGrpcAsyncIOTransport, "grpc_asyncio", "true", ), ( ErrorStatsServiceClient, transports.ErrorStatsServiceGrpcTransport, "grpc", "false", ), ( ErrorStatsServiceAsyncClient, transports.ErrorStatsServiceGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( ErrorStatsServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ErrorStatsServiceClient), ) @mock.patch.object( ErrorStatsServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ErrorStatsServiceAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_error_stats_service_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (ErrorStatsServiceClient, transports.ErrorStatsServiceGrpcTransport, "grpc"), ( ErrorStatsServiceAsyncClient, transports.ErrorStatsServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) def test_error_stats_service_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (ErrorStatsServiceClient, transports.ErrorStatsServiceGrpcTransport, "grpc"), ( ErrorStatsServiceAsyncClient, transports.ErrorStatsServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) def test_error_stats_service_client_client_options_credentials_file( client_class, transport_class, transport_name ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_error_stats_service_client_client_options_from_dict(): with mock.patch( "google.cloud.errorreporting_v1beta1.services.error_stats_service.transports.ErrorStatsServiceGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = ErrorStatsServiceClient( client_options={"api_endpoint": "squid.clam.whelk"} ) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "request_type", [error_stats_service.ListGroupStatsRequest, dict,] ) def test_list_group_stats(request_type, transport: str = "grpc"): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.ListGroupStatsResponse( next_page_token="next_page_token_value", ) response = client.list_group_stats(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.ListGroupStatsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListGroupStatsPager) assert response.next_page_token == "next_page_token_value" def test_list_group_stats_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: client.list_group_stats() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.ListGroupStatsRequest() @pytest.mark.asyncio async def test_list_group_stats_async( transport: str = "grpc_asyncio", request_type=error_stats_service.ListGroupStatsRequest, ): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.ListGroupStatsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_group_stats(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.ListGroupStatsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListGroupStatsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_group_stats_async_from_dict(): await test_list_group_stats_async(request_type=dict) def test_list_group_stats_field_headers(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = error_stats_service.ListGroupStatsRequest() request.project_name = "project_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: call.return_value = error_stats_service.ListGroupStatsResponse() client.list_group_stats(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "project_name=project_name/value",) in kw[ "metadata" ] @pytest.mark.asyncio async def test_list_group_stats_field_headers_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = error_stats_service.ListGroupStatsRequest() request.project_name = "project_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.ListGroupStatsResponse() ) await client.list_group_stats(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "project_name=project_name/value",) in kw[ "metadata" ] def test_list_group_stats_flattened(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.ListGroupStatsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_group_stats( project_name="project_name_value", time_range=error_stats_service.QueryTimeRange( period=error_stats_service.QueryTimeRange.Period.PERIOD_1_HOUR ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].project_name mock_val = "project_name_value" assert arg == mock_val arg = args[0].time_range mock_val = error_stats_service.QueryTimeRange( period=error_stats_service.QueryTimeRange.Period.PERIOD_1_HOUR ) assert arg == mock_val def test_list_group_stats_flattened_error(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_group_stats( error_stats_service.ListGroupStatsRequest(), project_name="project_name_value", time_range=error_stats_service.QueryTimeRange( period=error_stats_service.QueryTimeRange.Period.PERIOD_1_HOUR ), ) @pytest.mark.asyncio async def test_list_group_stats_flattened_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.ListGroupStatsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.ListGroupStatsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_group_stats( project_name="project_name_value", time_range=error_stats_service.QueryTimeRange( period=error_stats_service.QueryTimeRange.Period.PERIOD_1_HOUR ), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].project_name mock_val = "project_name_value" assert arg == mock_val arg = args[0].time_range mock_val = error_stats_service.QueryTimeRange( period=error_stats_service.QueryTimeRange.Period.PERIOD_1_HOUR ) assert arg == mock_val @pytest.mark.asyncio async def test_list_group_stats_flattened_error_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_group_stats( error_stats_service.ListGroupStatsRequest(), project_name="project_name_value", time_range=error_stats_service.QueryTimeRange( period=error_stats_service.QueryTimeRange.Period.PERIOD_1_HOUR ), ) def test_list_group_stats_pager(transport_name: str = "grpc"): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], next_page_token="abc", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[], next_page_token="def", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[error_stats_service.ErrorGroupStats(),], next_page_token="ghi", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("project_name", ""),)), ) pager = client.list_group_stats(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, error_stats_service.ErrorGroupStats) for i in results) def test_list_group_stats_pages(transport_name: str = "grpc"): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_group_stats), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], next_page_token="abc", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[], next_page_token="def", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[error_stats_service.ErrorGroupStats(),], next_page_token="ghi", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], ), RuntimeError, ) pages = list(client.list_group_stats(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_group_stats_async_pager(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_group_stats), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], next_page_token="abc", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[], next_page_token="def", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[error_stats_service.ErrorGroupStats(),], next_page_token="ghi", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], ), RuntimeError, ) async_pager = await client.list_group_stats(request={},) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all( isinstance(i, error_stats_service.ErrorGroupStats) for i in responses ) @pytest.mark.asyncio async def test_list_group_stats_async_pages(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_group_stats), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], next_page_token="abc", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[], next_page_token="def", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[error_stats_service.ErrorGroupStats(),], next_page_token="ghi", ), error_stats_service.ListGroupStatsResponse( error_group_stats=[ error_stats_service.ErrorGroupStats(), error_stats_service.ErrorGroupStats(), ], ), RuntimeError, ) pages = [] async for page_ in (await client.list_group_stats(request={})).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize("request_type", [error_stats_service.ListEventsRequest, dict,]) def test_list_events(request_type, transport: str = "grpc"): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.ListEventsResponse( next_page_token="next_page_token_value", ) response = client.list_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.ListEventsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListEventsPager) assert response.next_page_token == "next_page_token_value" def test_list_events_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: client.list_events() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.ListEventsRequest() @pytest.mark.asyncio async def test_list_events_async( transport: str = "grpc_asyncio", request_type=error_stats_service.ListEventsRequest ): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.ListEventsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.ListEventsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListEventsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_events_async_from_dict(): await test_list_events_async(request_type=dict) def test_list_events_field_headers(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = error_stats_service.ListEventsRequest() request.project_name = "project_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: call.return_value = error_stats_service.ListEventsResponse() client.list_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "project_name=project_name/value",) in kw[ "metadata" ] @pytest.mark.asyncio async def test_list_events_field_headers_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = error_stats_service.ListEventsRequest() request.project_name = "project_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.ListEventsResponse() ) await client.list_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "project_name=project_name/value",) in kw[ "metadata" ] def test_list_events_flattened(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.ListEventsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_events( project_name="project_name_value", group_id="group_id_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].project_name mock_val = "project_name_value" assert arg == mock_val arg = args[0].group_id mock_val = "group_id_value" assert arg == mock_val def test_list_events_flattened_error(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_events( error_stats_service.ListEventsRequest(), project_name="project_name_value", group_id="group_id_value", ) @pytest.mark.asyncio async def test_list_events_flattened_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.ListEventsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.ListEventsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_events( project_name="project_name_value", group_id="group_id_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].project_name mock_val = "project_name_value" assert arg == mock_val arg = args[0].group_id mock_val = "group_id_value" assert arg == mock_val @pytest.mark.asyncio async def test_list_events_flattened_error_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_events( error_stats_service.ListEventsRequest(), project_name="project_name_value", group_id="group_id_value", ) def test_list_events_pager(transport_name: str = "grpc"): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListEventsResponse( error_events=[ common.ErrorEvent(), common.ErrorEvent(), common.ErrorEvent(), ], next_page_token="abc", ), error_stats_service.ListEventsResponse( error_events=[], next_page_token="def", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(),], next_page_token="ghi", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(), common.ErrorEvent(),], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("project_name", ""),)), ) pager = client.list_events(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, common.ErrorEvent) for i in results) def test_list_events_pages(transport_name: str = "grpc"): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_events), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListEventsResponse( error_events=[ common.ErrorEvent(), common.ErrorEvent(), common.ErrorEvent(), ], next_page_token="abc", ), error_stats_service.ListEventsResponse( error_events=[], next_page_token="def", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(),], next_page_token="ghi", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(), common.ErrorEvent(),], ), RuntimeError, ) pages = list(client.list_events(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_events_async_pager(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_events), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListEventsResponse( error_events=[ common.ErrorEvent(), common.ErrorEvent(), common.ErrorEvent(), ], next_page_token="abc", ), error_stats_service.ListEventsResponse( error_events=[], next_page_token="def", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(),], next_page_token="ghi", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(), common.ErrorEvent(),], ), RuntimeError, ) async_pager = await client.list_events(request={},) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, common.ErrorEvent) for i in responses) @pytest.mark.asyncio async def test_list_events_async_pages(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_events), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( error_stats_service.ListEventsResponse( error_events=[ common.ErrorEvent(), common.ErrorEvent(), common.ErrorEvent(), ], next_page_token="abc", ), error_stats_service.ListEventsResponse( error_events=[], next_page_token="def", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(),], next_page_token="ghi", ), error_stats_service.ListEventsResponse( error_events=[common.ErrorEvent(), common.ErrorEvent(),], ), RuntimeError, ) pages = [] async for page_ in (await client.list_events(request={})).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.parametrize( "request_type", [error_stats_service.DeleteEventsRequest, dict,] ) def test_delete_events(request_type, transport: str = "grpc"): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.DeleteEventsResponse() response = client.delete_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.DeleteEventsRequest() # Establish that the response is the type that we expect. assert isinstance(response, error_stats_service.DeleteEventsResponse) def test_delete_events_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_events), "__call__") as call: client.delete_events() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.DeleteEventsRequest() @pytest.mark.asyncio async def test_delete_events_async( transport: str = "grpc_asyncio", request_type=error_stats_service.DeleteEventsRequest, ): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.DeleteEventsResponse() ) response = await client.delete_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == error_stats_service.DeleteEventsRequest() # Establish that the response is the type that we expect. assert isinstance(response, error_stats_service.DeleteEventsResponse) @pytest.mark.asyncio async def test_delete_events_async_from_dict(): await test_delete_events_async(request_type=dict) def test_delete_events_field_headers(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = error_stats_service.DeleteEventsRequest() request.project_name = "project_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_events), "__call__") as call: call.return_value = error_stats_service.DeleteEventsResponse() client.delete_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "project_name=project_name/value",) in kw[ "metadata" ] @pytest.mark.asyncio async def test_delete_events_field_headers_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = error_stats_service.DeleteEventsRequest() request.project_name = "project_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_events), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.DeleteEventsResponse() ) await client.delete_events(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "project_name=project_name/value",) in kw[ "metadata" ] def test_delete_events_flattened(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.DeleteEventsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_events(project_name="project_name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] arg = args[0].project_name mock_val = "project_name_value" assert arg == mock_val def test_delete_events_flattened_error(): client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_events( error_stats_service.DeleteEventsRequest(), project_name="project_name_value", ) @pytest.mark.asyncio async def test_delete_events_flattened_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_events), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = error_stats_service.DeleteEventsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( error_stats_service.DeleteEventsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_events(project_name="project_name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] arg = args[0].project_name mock_val = "project_name_value" assert arg == mock_val @pytest.mark.asyncio async def test_delete_events_flattened_error_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_events( error_stats_service.DeleteEventsRequest(), project_name="project_name_value", ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.ErrorStatsServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.ErrorStatsServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = ErrorStatsServiceClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide scopes and a transport instance. transport = transports.ErrorStatsServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = ErrorStatsServiceClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.ErrorStatsServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = ErrorStatsServiceClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.ErrorStatsServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.ErrorStatsServiceGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [ transports.ErrorStatsServiceGrpcTransport, transports.ErrorStatsServiceGrpcAsyncIOTransport, ], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = ErrorStatsServiceClient(credentials=ga_credentials.AnonymousCredentials(),) assert isinstance(client.transport, transports.ErrorStatsServiceGrpcTransport,) def test_error_stats_service_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.ErrorStatsServiceTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_error_stats_service_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.errorreporting_v1beta1.services.error_stats_service.transports.ErrorStatsServiceTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.ErrorStatsServiceTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "list_group_stats", "list_events", "delete_events", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) with pytest.raises(NotImplementedError): transport.close() def test_error_stats_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.errorreporting_v1beta1.services.error_stats_service.transports.ErrorStatsServiceTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.ErrorStatsServiceTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) def test_error_stats_service_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.errorreporting_v1beta1.services.error_stats_service.transports.ErrorStatsServiceTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.ErrorStatsServiceTransport() adc.assert_called_once() def test_error_stats_service_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) ErrorStatsServiceClient() adc.assert_called_once_with( scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.ErrorStatsServiceGrpcTransport, transports.ErrorStatsServiceGrpcAsyncIOTransport, ], ) def test_error_stats_service_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.ErrorStatsServiceGrpcTransport, grpc_helpers), (transports.ErrorStatsServiceGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_error_stats_service_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "clouderrorreporting.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=("https://www.googleapis.com/auth/cloud-platform",), scopes=["1", "2"], default_host="clouderrorreporting.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [ transports.ErrorStatsServiceGrpcTransport, transports.ErrorStatsServiceGrpcAsyncIOTransport, ], ) def test_error_stats_service_grpc_transport_client_cert_source_for_mtls( transport_class, ): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) def test_error_stats_service_host_no_port(): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="clouderrorreporting.googleapis.com" ), ) assert client.transport._host == "clouderrorreporting.googleapis.com:443" def test_error_stats_service_host_with_port(): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="clouderrorreporting.googleapis.com:8000" ), ) assert client.transport._host == "clouderrorreporting.googleapis.com:8000" def test_error_stats_service_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.ErrorStatsServiceGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_error_stats_service_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.ErrorStatsServiceGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ transports.ErrorStatsServiceGrpcTransport, transports.ErrorStatsServiceGrpcAsyncIOTransport, ], ) def test_error_stats_service_transport_channel_mtls_with_client_cert_source( transport_class, ): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ transports.ErrorStatsServiceGrpcTransport, transports.ErrorStatsServiceGrpcAsyncIOTransport, ], ) def test_error_stats_service_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_error_group_path(): project = "squid" group = "clam" expected = "projects/{project}/groups/{group}".format(project=project, group=group,) actual = ErrorStatsServiceClient.error_group_path(project, group) assert expected == actual def test_parse_error_group_path(): expected = { "project": "whelk", "group": "octopus", } path = ErrorStatsServiceClient.error_group_path(**expected) # Check that the path construction is reversible. actual = ErrorStatsServiceClient.parse_error_group_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = ErrorStatsServiceClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "nudibranch", } path = ErrorStatsServiceClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = ErrorStatsServiceClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "cuttlefish" expected = "folders/{folder}".format(folder=folder,) actual = ErrorStatsServiceClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "mussel", } path = ErrorStatsServiceClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = ErrorStatsServiceClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "winkle" expected = "organizations/{organization}".format(organization=organization,) actual = ErrorStatsServiceClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nautilus", } path = ErrorStatsServiceClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = ErrorStatsServiceClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "scallop" expected = "projects/{project}".format(project=project,) actual = ErrorStatsServiceClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "abalone", } path = ErrorStatsServiceClient.common_project_path(**expected) # Check that the path construction is reversible. actual = ErrorStatsServiceClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "squid" location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = ErrorStatsServiceClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "whelk", "location": "octopus", } path = ErrorStatsServiceClient.common_location_path(**expected) # Check that the path construction is reversible. actual = ErrorStatsServiceClient.parse_common_location_path(path) assert expected == actual def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.ErrorStatsServiceTransport, "_prep_wrapped_messages" ) as prep: client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.ErrorStatsServiceTransport, "_prep_wrapped_messages" ) as prep: transport_class = ErrorStatsServiceClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @pytest.mark.asyncio async def test_transport_close_async(): client = ErrorStatsServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() def test_transport_close(): transports = { "grpc": "_grpc_channel", } for transport, close_name in transports.items(): client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" ) as close: with client: close.assert_not_called() close.assert_called_once() def test_client_ctx(): transports = [ "grpc", ] for transport in transports: client = ErrorStatsServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: close.assert_not_called() with client: pass close.assert_called()
38.066056
135
0.676905
4a01327cdaa0554f0ccab7e202c8befbad762cad
5,500
py
Python
Ryu/transinfo_server.py
JoshuaYu-crash/C4EP2-2021
62e2e5a2fb396c598e7e8a265515c342fb78f63e
[ "MIT" ]
null
null
null
Ryu/transinfo_server.py
JoshuaYu-crash/C4EP2-2021
62e2e5a2fb396c598e7e8a265515c342fb78f63e
[ "MIT" ]
null
null
null
Ryu/transinfo_server.py
JoshuaYu-crash/C4EP2-2021
62e2e5a2fb396c598e7e8a265515c342fb78f63e
[ "MIT" ]
null
null
null
from sqlalchemy import Column, String, create_engine, Integer, Boolean from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base import transinfo_pb2_grpc import transinfo_pb2 import logging import grpc import time from concurrent import futures import redis from config import Config r = redis.Redis(host="127.0.0.1", port=6379) r["update_time"] = int(time.time()) session = None def insert(req): # session = get_db_session() new_pkg = Pkg(Ty=req.type, protocol=req.protocol, saddr=req.saddr, sport=req.sport, send_byte=req.send_byte, daddr=req.daddr, dport=req.dport, recv_byte=req.recv_byte, time=int(time.time()), pid=req.pid, com=req.com, host=req.host) session.add(new_pkg) session.commit() # >threshold1: warning(byte); >threshold2: ban def query(saddr, threshold1=Config.doubtThreshold, threshold2=Config.dangerThreshold): # session = get_db_session() now = int(time.time()) pkg = session.query(Pkg).filter(Pkg.time > now - 60, Pkg.saddr == saddr).all() # .all() send_sum = 0 for e in pkg: send_sum += e.send_byte if send_sum > threshold2: ret = 2 elif send_sum > threshold1: ret = 1 else: ret = 0 print(ret) return ret def broadcast_to_clients(): import json # session = get_db_session() banned_IPs = [] IPs = session.query(BanIP).filter(BanIP.banned == True).all() # .all() for ip in IPs: banned_IPs.append(ip.ban_ip) r.publish("Banned IPs", json.dumps(banned_IPs)) def ban(saddr, banned=True): # True => banned, False => warning # session = get_db_session() item = session.query(BanIP).filter(BanIP.ban_ip == saddr).first() return_code = 0 if item == None: new_ban_ip = BanIP(ban_ip=saddr, banned=banned) session.add(new_ban_ip) if banned: r["update_time"] = int(time.time()) session.commit() # print(str(saddr) + " is added to the banned list.") broadcast_to_clients() elif item.banned != banned: item.banned = banned r["update_time"] = int(time.time()) session.commit() broadcast_to_clients() else: print("Banned ip add failed. " + str(saddr) + " exists.") def add_danger_ip(saddr): ban(saddr, banned=True) def add_doubt_ip(saddr): ban(saddr, banned=False) def get_db_session(): engine = create_engine( 'mysql+mysqldb://root:password@localhost:3306/package') Base.metadata.create_all(engine) DBSession = sessionmaker(bind=engine) Session = DBSession() return Session def get_ban_list(): # session = get_db_session() ips = session.query(BanIP).filter(BanIP.banned == True).all() ban_list = [] for e in ips: ban_list.append(e.ban_ip) return ban_list class TransInfo: def GetInfo(self, request, context): print(request) insert(req=request) isToBan = query(request.saddr, threshold1=Config.doubtThreshold, threshold2=Config.dangerThreshold) if isToBan == 2: ban(request.saddr, banned=True) elif isToBan == 1: ban(request.saddr, banned=False) ban_list = get_ban_list() print(str(r["update_time"]) + " >= " + str(request.prev_time)) # if int(r["update_time"]) >= request.prev_time and ban_list: # # return transinfo_pb2.SuccessReply(reply_code=2, reply=str(ban_list)) # return transinfo_pb2.SuccessReply(reply_code=2, reply="") # else: return transinfo_pb2.SuccessReply(reply_code=1, reply="") Base = declarative_base() class Pkg(Base): __tablename__ = 'pkg' id = Column(Integer, primary_key=True) Ty = Column(String(8)) # type protocol = Column(String(10)) daddr = Column(String(40)) dport = Column(Integer) saddr = Column(String(40)) sport = Column(Integer) send_byte = Column(Integer) recv_byte = Column(Integer) time = Column(Integer) pid = Column(Integer) com = Column(String(20)) host = Column(String(40)) def __init__(self, Ty, protocol, saddr, sport, send_byte, daddr, dport, recv_byte, time, pid, com, host): # {'type': 'ip4', 'data': {'daddr': '192.168.200.200', 'send_byte': 1400, 'sport': '22', 'recv_byte': 1160, 'time': 1623748639.296404, 'dport': '6989', 'com': '7432', 'saddr': '30.0.1.77', 'pid': 7432}, 'protocol': 'tcp'} self.Ty = Ty self.protocol = protocol self.saddr = saddr self.sport = sport self.send_byte = send_byte self.daddr = daddr self.dport = dport self.recv_byte = recv_byte self.time = time self.pid = pid self.com = com self.host = host class BanIP(Base): __tablename__ = "banIP" id = Column(Integer, primary_key=True) ban_ip = Column(String(40)) banned = Column(Boolean) def __init__(self, ban_ip, banned): self.ban_ip = ban_ip self.banned = banned def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) transinfo_pb2_grpc.add_TransInfoServicer_to_server(TransInfo(), server) server.add_insecure_port('[::]:11451') server.start() server.wait_for_termination() def run(): global session session = get_db_session() logging.basicConfig() serve() if __name__ == '__main__': run()
28.795812
229
0.631818
4a0132fa86de3baa6fbdcf0b327f45144edf7358
376
py
Python
helper.py
aayushgupta97/advent_of_code_2020
dbf5291f1cf4bd5855bbc54cb560ac3c7a7ac26c
[ "MIT" ]
null
null
null
helper.py
aayushgupta97/advent_of_code_2020
dbf5291f1cf4bd5855bbc54cb560ac3c7a7ac26c
[ "MIT" ]
null
null
null
helper.py
aayushgupta97/advent_of_code_2020
dbf5291f1cf4bd5855bbc54cb560ac3c7a7ac26c
[ "MIT" ]
null
null
null
import functools def printsol(func): @functools.wraps(func) def wrapper(*args, **kwargs): ret = func(*args, **kwargs) print("The Solution to the problem for {} is {}".format(func.__name__, ret)) return wrapper def read_text_file(fname): with open(f"data/{fname}", "r") as f: return [line.replace("\n", "") for line in f.readlines()]
26.857143
84
0.619681
4a01339cefcbb653737df4d5c619076c3c02fca5
1,398
py
Python
ARCH_FILES/src/main.py
Andrew95496/hypergraze
224719eb661a4069923355930ef3b5f0aca44dde
[ "MIT" ]
null
null
null
ARCH_FILES/src/main.py
Andrew95496/hypergraze
224719eb661a4069923355930ef3b5f0aca44dde
[ "MIT" ]
null
null
null
ARCH_FILES/src/main.py
Andrew95496/hypergraze
224719eb661a4069923355930ef3b5f0aca44dde
[ "MIT" ]
null
null
null
# import sys # sys.path.append('/Users/drewskikatana/hypergraze/config') # sys.path.append('/Users/drewskikatana/hypergraze/modules') # import re # import psycopg2 # # ? My modules # from configs import config as cf # from modules import get_text # def main(): # # Regex Code # try: # text = get_text() # except UnboundLocalError: # print('Not Valid URL') # main() # # Regex # USER_INPUT = input('What do you want to find: ') # USER_REGEX = re.compile(USER_INPUT) # METACHAR = USER_REGEX.findall(text) # print(METACHAR) # CONN = None # CUR = None # try: # CONN = psycopg2.connect( # host=cf.hostname, # dbname=cf.database, # user=cf.username, # password=cf.pwd, # port=cf.port_id) # # cursor # CUR = CONN.cursor() # # queries # INSERT_SCRIPT = 'insert into user_info (URL, text) values ( %s, %s);' # INSERT_VALUES = (USER_INPUT, METACHAR) # CUR.execute(INSERT_SCRIPT, INSERT_VALUES) # # commit # CONN.commit() # except Exception as error: # print(error) # finally: # # ! ALWAYS CLOSE CONNECTIONS # if CUR is not None: # CUR.close() # if CONN is not None: # CONN.close() # if __name__ == "__main__": # main()
22.548387
79
0.54578
4a0133d317dcb508959429bc3cf189417ab179ec
1,124
py
Python
hbase_kernel/images.py
f-cg/hbase_kernel
d107719d51c8f72b0c6323dab43b5ec9e9ec80ea
[ "BSD-3-Clause" ]
null
null
null
hbase_kernel/images.py
f-cg/hbase_kernel
d107719d51c8f72b0c6323dab43b5ec9e9ec80ea
[ "BSD-3-Clause" ]
null
null
null
hbase_kernel/images.py
f-cg/hbase_kernel
d107719d51c8f72b0c6323dab43b5ec9e9ec80ea
[ "BSD-3-Clause" ]
null
null
null
import base64 import imghdr import os # from IPython. _TEXT_SAVED_IMAGE = "hbase_kernel: saved image data to:" image_setup_cmd = """ display () { TMPFILE=$(mktemp ${TMPDIR-/tmp}/bash_kernel.XXXXXXXXXX) cat > $TMPFILE echo "%s $TMPFILE" >&2 } """ % _TEXT_SAVED_IMAGE def display_data_for_image(filename): with open(filename, 'rb') as f: image = f.read() os.unlink(filename) image_type = imghdr.what(None, image) if image_type is None: raise ValueError("Not a valid image: %s" % image) image_data = base64.b64encode(image).decode('ascii') content = { 'data': { 'image/' + image_type: image_data }, 'metadata': {} } return content def extract_image_filenames(output): output_lines = [] image_filenames = [] for line in output.split("\n"): if line.startswith(_TEXT_SAVED_IMAGE): filename = line.rstrip().split(": ")[-1] image_filenames.append(filename) else: output_lines.append(line) output = "\n".join(output_lines) return image_filenames, output
22.48
59
0.618327
4a0134e8b0f2730e110dde84cbc42edaa5b05e5a
1,061
py
Python
core/jobs/types/feedback_validation_errors.py
nbaddam/oppia
e58b81e57007f25537ba8ed71b42bfd9ae661799
[ "Apache-2.0" ]
5,422
2015-08-14T01:56:44.000Z
2022-03-31T23:31:56.000Z
core/jobs/types/feedback_validation_errors.py
nbaddam/oppia
e58b81e57007f25537ba8ed71b42bfd9ae661799
[ "Apache-2.0" ]
14,178
2015-08-14T05:21:45.000Z
2022-03-31T23:54:10.000Z
core/jobs/types/feedback_validation_errors.py
nbaddam/oppia
e58b81e57007f25537ba8ed71b42bfd9ae661799
[ "Apache-2.0" ]
3,574
2015-08-14T04:20:06.000Z
2022-03-29T01:52:37.000Z
# coding: utf-8 # # Copyright 2021 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Error classes for feedback model audits.""" from __future__ import annotations from core.jobs.types import base_validation_errors class InvalidEntityTypeError(base_validation_errors.BaseAuditError): """Error class for models that have invalid entity type.""" def __init__(self, model): message = 'entity type %s is invalid.' % model.entity_type super(InvalidEntityTypeError, self).__init__(message, model)
35.366667
74
0.755891
4a013562c6e3988dad025463ff932e03506bad1d
597
py
Python
M4nifest01.py
password520/M4nifest0_CAM_Hacking
b7841afb00cd87ad33afc947fca1203fea03cd16
[ "MIT" ]
7
2021-09-08T10:48:24.000Z
2022-03-29T06:43:56.000Z
M4nifest01.py
M4nifest0-Black-Hat-Hacking/M4nifest0_CAM_Hacking
8196b76650bcea20b8f43cf7b9dd1ffa2d05000a
[ "MIT" ]
null
null
null
M4nifest01.py
M4nifest0-Black-Hat-Hacking/M4nifest0_CAM_Hacking
8196b76650bcea20b8f43cf7b9dd1ffa2d05000a
[ "MIT" ]
4
2021-09-08T10:48:29.000Z
2021-11-15T23:08:51.000Z
print(colored(""" ███╗ ███╗██╗ ██╗███╗ ██╗██╗███████╗███████╗███████╗████████╗ ██████╗ ████╗ ████║██║ ██║████╗ ██║██║██╔════╝██╔════╝██╔════╝╚══██╔══╝██╔═████╗ ██╔████╔██║███████║██╔██╗ ██║██║█████╗ █████╗ ███████╗ ██║ ██║██╔██║ ██║╚██╔╝██║╚════██║██║╚██╗██║██║██╔══╝ ██╔══╝ ╚════██║ ██║ ████╔╝██║ ██║ ╚═╝ ██║ ██║██║ ╚████║██║██║ ███████╗███████║ ██║ ╚██████╔╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚═════╝ """, "blue"))
39.8
76
0.026801
4a01367cc209e568e00838246ff94d2c3091c9b9
33,961
py
Python
homeassistant/components/fritz/common.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/fritz/common.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
24,710
2016-04-13T08:27:26.000Z
2020-03-02T12:59:13.000Z
homeassistant/components/fritz/common.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Support for AVM FRITZ!Box classes.""" from __future__ import annotations from collections.abc import Callable, ValuesView from dataclasses import dataclass, field from datetime import datetime, timedelta from functools import partial import logging from types import MappingProxyType from typing import Any, TypedDict, cast from fritzconnection import FritzConnection from fritzconnection.core.exceptions import ( FritzActionError, FritzConnectionException, FritzSecurityError, FritzServiceError, ) from fritzconnection.lib.fritzhosts import FritzHosts from fritzconnection.lib.fritzstatus import FritzStatus from fritzconnection.lib.fritzwlan import DEFAULT_PASSWORD_LENGTH, FritzGuestWLAN from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN from homeassistant.components.device_tracker.const import ( CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME, ) from homeassistant.components.switch import DOMAIN as DEVICE_SWITCH_DOMAIN from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant, ServiceCall, callback from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import ( device_registry as dr, entity_registry as er, update_coordinator, ) from homeassistant.helpers.dispatcher import dispatcher_send from homeassistant.helpers.entity import DeviceInfo from homeassistant.util import dt as dt_util from .const import ( CONF_OLD_DISCOVERY, DEFAULT_CONF_OLD_DISCOVERY, DEFAULT_DEVICE_NAME, DEFAULT_HOST, DEFAULT_PORT, DEFAULT_USERNAME, DOMAIN, FRITZ_EXCEPTIONS, SERVICE_CLEANUP, SERVICE_REBOOT, SERVICE_RECONNECT, SERVICE_SET_GUEST_WIFI_PW, MeshRoles, ) _LOGGER = logging.getLogger(__name__) def _is_tracked(mac: str, current_devices: ValuesView) -> bool: """Check if device is already tracked.""" for tracked in current_devices: if mac in tracked: return True return False def device_filter_out_from_trackers( mac: str, device: FritzDevice, current_devices: ValuesView, ) -> bool: """Check if device should be filtered out from trackers.""" reason: str | None = None if device.ip_address == "": reason = "Missing IP" elif _is_tracked(mac, current_devices): reason = "Already tracked" if reason: _LOGGER.debug( "Skip adding device %s [%s], reason: %s", device.hostname, mac, reason ) return bool(reason) def _cleanup_entity_filter(device: er.RegistryEntry) -> bool: """Filter only relevant entities.""" return device.domain == DEVICE_TRACKER_DOMAIN or ( device.domain == DEVICE_SWITCH_DOMAIN and "_internet_access" in device.entity_id ) def _ha_is_stopping(activity: str) -> None: """Inform that HA is stopping.""" _LOGGER.info("Cannot execute %s: HomeAssistant is shutting down", activity) class ClassSetupMissing(Exception): """Raised when a Class func is called before setup.""" def __init__(self) -> None: """Init custom exception.""" super().__init__("Function called before Class setup") @dataclass class Device: """FRITZ!Box device class.""" connected: bool connected_to: str connection_type: str ip_address: str name: str ssid: str | None wan_access: bool | None = None class Interface(TypedDict): """Interface details.""" device: str mac: str op_mode: str ssid: str | None type: str class HostInfo(TypedDict): """FRITZ!Box host info class.""" mac: str name: str ip: str status: bool class FritzBoxTools(update_coordinator.DataUpdateCoordinator): """FritzBoxTools class.""" def __init__( self, hass: HomeAssistant, password: str, username: str = DEFAULT_USERNAME, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT, ) -> None: """Initialize FritzboxTools class.""" super().__init__( hass=hass, logger=_LOGGER, name=f"{DOMAIN}-{host}-coordinator", update_interval=timedelta(seconds=30), ) self._devices: dict[str, FritzDevice] = {} self._options: MappingProxyType[str, Any] | None = None self._unique_id: str | None = None self.connection: FritzConnection = None self.fritz_guest_wifi: FritzGuestWLAN = None self.fritz_hosts: FritzHosts = None self.fritz_status: FritzStatus = None self.hass = hass self.host = host self.mesh_role = MeshRoles.NONE self.device_conn_type: str | None = None self.device_is_router: bool = False self.password = password self.port = port self.username = username self._model: str | None = None self._current_firmware: str | None = None self._latest_firmware: str | None = None self._update_available: bool = False self._release_url: str | None = None async def async_setup( self, options: MappingProxyType[str, Any] | None = None ) -> None: """Wrap up FritzboxTools class setup.""" self._options = options await self.hass.async_add_executor_job(self.setup) def setup(self) -> None: """Set up FritzboxTools class.""" self.connection = FritzConnection( address=self.host, port=self.port, user=self.username, password=self.password, timeout=60.0, pool_maxsize=30, ) if not self.connection: _LOGGER.error("Unable to establish a connection with %s", self.host) return _LOGGER.debug( "detected services on %s %s", self.host, list(self.connection.services.keys()), ) self.fritz_hosts = FritzHosts(fc=self.connection) self.fritz_guest_wifi = FritzGuestWLAN(fc=self.connection) self.fritz_status = FritzStatus(fc=self.connection) info = self.connection.call_action("DeviceInfo:1", "GetInfo") _LOGGER.debug( "gathered device info of %s %s", self.host, { **info, "NewDeviceLog": "***omitted***", "NewSerialNumber": "***omitted***", }, ) if not self._unique_id: self._unique_id = info["NewSerialNumber"] self._model = info.get("NewModelName") self._current_firmware = info.get("NewSoftwareVersion") ( self._update_available, self._latest_firmware, self._release_url, ) = self._update_device_info() if "Layer3Forwarding1" in self.connection.services: if connection_type := self.connection.call_action( "Layer3Forwarding1", "GetDefaultConnectionService" ).get("NewDefaultConnectionService"): # Return NewDefaultConnectionService sample: "1.WANPPPConnection.1" self.device_conn_type = connection_type[2:][:-2] self.device_is_router = self.connection.call_action( self.device_conn_type, "GetInfo" ).get("NewEnable") @callback async def _async_update_data(self) -> None: """Update FritzboxTools data.""" try: await self.async_scan_devices() except FRITZ_EXCEPTIONS as ex: raise update_coordinator.UpdateFailed(ex) from ex @property def unique_id(self) -> str: """Return unique id.""" if not self._unique_id: raise ClassSetupMissing() return self._unique_id @property def model(self) -> str: """Return device model.""" if not self._model: raise ClassSetupMissing() return self._model @property def current_firmware(self) -> str: """Return current SW version.""" if not self._current_firmware: raise ClassSetupMissing() return self._current_firmware @property def latest_firmware(self) -> str | None: """Return latest SW version.""" return self._latest_firmware @property def update_available(self) -> bool: """Return if new SW version is available.""" return self._update_available @property def release_url(self) -> str | None: """Return the info URL for latest firmware.""" return self._release_url @property def mac(self) -> str: """Return device Mac address.""" if not self._unique_id: raise ClassSetupMissing() return dr.format_mac(self._unique_id) @property def devices(self) -> dict[str, FritzDevice]: """Return devices.""" return self._devices @property def signal_device_new(self) -> str: """Event specific per FRITZ!Box entry to signal new device.""" return f"{DOMAIN}-device-new-{self._unique_id}" @property def signal_device_update(self) -> str: """Event specific per FRITZ!Box entry to signal updates in devices.""" return f"{DOMAIN}-device-update-{self._unique_id}" def _update_hosts_info(self) -> list[HostInfo]: """Retrieve latest hosts information from the FRITZ!Box.""" try: return self.fritz_hosts.get_hosts_info() # type: ignore [no-any-return] except Exception as ex: # pylint: disable=[broad-except] if not self.hass.is_stopping: raise HomeAssistantError("Error refreshing hosts info") from ex return [] def _update_device_info(self) -> tuple[bool, str | None, str | None]: """Retrieve latest device information from the FRITZ!Box.""" info = self.connection.call_action("UserInterface1", "GetInfo") version = info.get("NewX_AVM-DE_Version") release_url = info.get("NewX_AVM-DE_InfoURL") return bool(version), version, release_url def _get_wan_access(self, ip_address: str) -> bool | None: """Get WAN access rule for given IP address.""" try: return not self.connection.call_action( "X_AVM-DE_HostFilter:1", "GetWANAccessByIP", NewIPv4Address=ip_address, ).get("NewDisallow") except FRITZ_EXCEPTIONS as ex: _LOGGER.debug( "could not get WAN access rule for client device with IP '%s', error: %s", ip_address, ex, ) return None async def async_scan_devices(self, now: datetime | None = None) -> None: """Wrap up FritzboxTools class scan.""" await self.hass.async_add_executor_job(self.scan_devices, now) def manage_device_info( self, dev_info: Device, dev_mac: str, consider_home: bool ) -> bool: """Update device lists.""" _LOGGER.debug("Client dev_info: %s", dev_info) if dev_mac in self._devices: self._devices[dev_mac].update(dev_info, consider_home) return False device = FritzDevice(dev_mac, dev_info.name) device.update(dev_info, consider_home) self._devices[dev_mac] = device return True def send_signal_device_update(self, new_device: bool) -> None: """Signal device data updated.""" dispatcher_send(self.hass, self.signal_device_update) if new_device: dispatcher_send(self.hass, self.signal_device_new) def scan_devices(self, now: datetime | None = None) -> None: """Scan for new devices and return a list of found device ids.""" if self.hass.is_stopping: _ha_is_stopping("scan devices") return _LOGGER.debug("Checking host info for FRITZ!Box device %s", self.host) ( self._update_available, self._latest_firmware, self._release_url, ) = self._update_device_info() _LOGGER.debug("Checking devices for FRITZ!Box device %s", self.host) _default_consider_home = DEFAULT_CONSIDER_HOME.total_seconds() if self._options: consider_home = self._options.get( CONF_CONSIDER_HOME, _default_consider_home ) else: consider_home = _default_consider_home new_device = False hosts = {} for host in self._update_hosts_info(): if not host.get("mac"): continue hosts[host["mac"]] = Device( name=host["name"], connected=host["status"], connected_to="", connection_type="", ip_address=host["ip"], ssid=None, wan_access=None, ) if ( "Hosts1" not in self.connection.services or "X_AVM-DE_GetMeshListPath" not in self.connection.services["Hosts1"].actions ) or ( self._options and self._options.get(CONF_OLD_DISCOVERY, DEFAULT_CONF_OLD_DISCOVERY) ): _LOGGER.debug( "Using old hosts discovery method. (Mesh not supported or user option)" ) self.mesh_role = MeshRoles.NONE for mac, info in hosts.items(): if info.ip_address: info.wan_access = self._get_wan_access(info.ip_address) if self.manage_device_info(info, mac, consider_home): new_device = True self.send_signal_device_update(new_device) return try: if not (topology := self.fritz_hosts.get_mesh_topology()): raise Exception("Mesh supported but empty topology reported") except FritzActionError: self.mesh_role = MeshRoles.SLAVE # Avoid duplicating device trackers return mesh_intf = {} # first get all meshed devices for node in topology.get("nodes", []): if not node["is_meshed"]: continue for interf in node["node_interfaces"]: int_mac = interf["mac_address"] mesh_intf[interf["uid"]] = Interface( device=node["device_name"], mac=int_mac, op_mode=interf.get("op_mode", ""), ssid=interf.get("ssid", ""), type=interf["type"], ) if dr.format_mac(int_mac) == self.mac: self.mesh_role = MeshRoles(node["mesh_role"]) # second get all client devices for node in topology.get("nodes", []): if node["is_meshed"]: continue for interf in node["node_interfaces"]: dev_mac = interf["mac_address"] if dev_mac not in hosts: continue dev_info: Device = hosts[dev_mac] if dev_info.ip_address: dev_info.wan_access = self._get_wan_access(dev_info.ip_address) for link in interf["node_links"]: intf = mesh_intf.get(link["node_interface_1_uid"]) if intf is not None: if intf["op_mode"] == "AP_GUEST": dev_info.wan_access = None dev_info.connected_to = intf["device"] dev_info.connection_type = intf["type"] dev_info.ssid = intf.get("ssid") if self.manage_device_info(dev_info, dev_mac, consider_home): new_device = True self.send_signal_device_update(new_device) async def async_trigger_firmware_update(self) -> bool: """Trigger firmware update.""" results = await self.hass.async_add_executor_job( self.connection.call_action, "UserInterface:1", "X_AVM-DE_DoUpdate" ) return cast(bool, results["NewX_AVM-DE_UpdateState"]) async def async_trigger_reboot(self) -> None: """Trigger device reboot.""" await self.hass.async_add_executor_job(self.connection.reboot) async def async_trigger_reconnect(self) -> None: """Trigger device reconnect.""" await self.hass.async_add_executor_job(self.connection.reconnect) async def async_trigger_set_guest_password( self, password: str | None, length: int ) -> None: """Trigger service to set a new guest wifi password.""" await self.hass.async_add_executor_job( self.fritz_guest_wifi.set_password, password, length ) async def async_trigger_cleanup( self, config_entry: ConfigEntry | None = None ) -> None: """Trigger device trackers cleanup.""" device_hosts_list = await self.hass.async_add_executor_job( self.fritz_hosts.get_hosts_info ) entity_reg: er.EntityRegistry = er.async_get(self.hass) if config_entry is None: if self.config_entry is None: return config_entry = self.config_entry ha_entity_reg_list: list[er.RegistryEntry] = er.async_entries_for_config_entry( entity_reg, config_entry.entry_id ) entities_removed: bool = False device_hosts_macs = set() device_hosts_names = set() for device in device_hosts_list: device_hosts_macs.add(device["mac"]) device_hosts_names.add(device["name"]) for entry in ha_entity_reg_list: if entry.original_name is None: continue entry_name = entry.name or entry.original_name entry_host = entry_name.split(" ")[0] entry_mac = entry.unique_id.split("_")[0] if not _cleanup_entity_filter(entry) or ( entry_mac in device_hosts_macs and entry_host in device_hosts_names ): _LOGGER.debug( "Skipping entity %s [mac=%s, host=%s]", entry_name, entry_mac, entry_host, ) continue _LOGGER.info("Removing entity: %s", entry_name) entity_reg.async_remove(entry.entity_id) entities_removed = True if entities_removed: self._async_remove_empty_devices(entity_reg, config_entry) @callback def _async_remove_empty_devices( self, entity_reg: er.EntityRegistry, config_entry: ConfigEntry ) -> None: """Remove devices with no entities.""" device_reg = dr.async_get(self.hass) device_list = dr.async_entries_for_config_entry( device_reg, config_entry.entry_id ) for device_entry in device_list: if not er.async_entries_for_device( entity_reg, device_entry.id, include_disabled_entities=True, ): _LOGGER.info("Removing device: %s", device_entry.name) device_reg.async_remove_device(device_entry.id) async def service_fritzbox( self, service_call: ServiceCall, config_entry: ConfigEntry ) -> None: """Define FRITZ!Box services.""" _LOGGER.debug("FRITZ!Box service: %s", service_call.service) if not self.connection: raise HomeAssistantError("Unable to establish a connection") try: if service_call.service == SERVICE_REBOOT: _LOGGER.warning( 'Service "fritz.reboot" is deprecated, please use the corresponding button entity instead' ) await self.async_trigger_reboot() return if service_call.service == SERVICE_RECONNECT: _LOGGER.warning( 'Service "fritz.reconnect" is deprecated, please use the corresponding button entity instead' ) await self.async_trigger_reconnect() return if service_call.service == SERVICE_CLEANUP: _LOGGER.warning( 'Service "fritz.cleanup" is deprecated, please use the corresponding button entity instead' ) await self.async_trigger_cleanup(config_entry) return if service_call.service == SERVICE_SET_GUEST_WIFI_PW: await self.async_trigger_set_guest_password( service_call.data.get("password"), service_call.data.get("length", DEFAULT_PASSWORD_LENGTH), ) return except (FritzServiceError, FritzActionError) as ex: raise HomeAssistantError("Service or parameter unknown") from ex except FritzConnectionException as ex: raise HomeAssistantError("Service not supported") from ex class AvmWrapper(FritzBoxTools): """Setup AVM wrapper for API calls.""" def _service_call_action( self, service_name: str, service_suffix: str, action_name: str, **kwargs: Any, ) -> dict: """Return service details.""" if self.hass.is_stopping: _ha_is_stopping(f"{service_name}/{action_name}") return {} if f"{service_name}{service_suffix}" not in self.connection.services: return {} try: result: dict = self.connection.call_action( f"{service_name}:{service_suffix}", action_name, **kwargs, ) return result except FritzSecurityError: _LOGGER.error( "Authorization Error: Please check the provided credentials and verify that you can log into the web interface", exc_info=True, ) except FRITZ_EXCEPTIONS: _LOGGER.error( "Service/Action Error: cannot execute service %s with action %s", service_name, action_name, exc_info=True, ) except FritzConnectionException: _LOGGER.error( "Connection Error: Please check the device is properly configured for remote login", exc_info=True, ) return {} async def async_get_upnp_configuration(self) -> dict[str, Any]: """Call X_AVM-DE_UPnP service.""" return await self.hass.async_add_executor_job(self.get_upnp_configuration) async def async_get_wan_link_properties(self) -> dict[str, Any]: """Call WANCommonInterfaceConfig service.""" return await self.hass.async_add_executor_job( partial(self.get_wan_link_properties) ) async def async_get_connection_info(self) -> ConnectionInfo: """Return ConnectionInfo data.""" link_properties = await self.async_get_wan_link_properties() connection_info = ConnectionInfo( connection=link_properties.get("NewWANAccessType", "").lower(), mesh_role=self.mesh_role, wan_enabled=self.device_is_router, ) _LOGGER.debug( "ConnectionInfo for FritzBox %s: %s", self.host, connection_info, ) return connection_info async def async_get_port_mapping(self, con_type: str, index: int) -> dict[str, Any]: """Call GetGenericPortMappingEntry action.""" return await self.hass.async_add_executor_job( partial(self.get_port_mapping, con_type, index) ) async def async_get_wlan_configuration(self, index: int) -> dict[str, Any]: """Call WLANConfiguration service.""" return await self.hass.async_add_executor_job( partial(self.get_wlan_configuration, index) ) async def async_get_ontel_deflections(self) -> dict[str, Any]: """Call GetDeflections action from X_AVM-DE_OnTel service.""" return await self.hass.async_add_executor_job( partial(self.get_ontel_deflections) ) async def async_set_wlan_configuration( self, index: int, turn_on: bool ) -> dict[str, Any]: """Call SetEnable action from WLANConfiguration service.""" return await self.hass.async_add_executor_job( partial(self.set_wlan_configuration, index, turn_on) ) async def async_set_deflection_enable( self, index: int, turn_on: bool ) -> dict[str, Any]: """Call SetDeflectionEnable service.""" return await self.hass.async_add_executor_job( partial(self.set_deflection_enable, index, turn_on) ) async def async_add_port_mapping( self, con_type: str, port_mapping: Any ) -> dict[str, Any]: """Call AddPortMapping service.""" return await self.hass.async_add_executor_job( partial( self.add_port_mapping, con_type, port_mapping, ) ) async def async_set_allow_wan_access( self, ip_address: str, turn_on: bool ) -> dict[str, Any]: """Call X_AVM-DE_HostFilter service.""" return await self.hass.async_add_executor_job( partial(self.set_allow_wan_access, ip_address, turn_on) ) def get_upnp_configuration(self) -> dict[str, Any]: """Call X_AVM-DE_UPnP service.""" return self._service_call_action("X_AVM-DE_UPnP", "1", "GetInfo") def get_ontel_num_deflections(self) -> dict[str, Any]: """Call GetNumberOfDeflections action from X_AVM-DE_OnTel service.""" return self._service_call_action( "X_AVM-DE_OnTel", "1", "GetNumberOfDeflections" ) def get_ontel_deflections(self) -> dict[str, Any]: """Call GetDeflections action from X_AVM-DE_OnTel service.""" return self._service_call_action("X_AVM-DE_OnTel", "1", "GetDeflections") def get_default_connection(self) -> dict[str, Any]: """Call Layer3Forwarding service.""" return self._service_call_action( "Layer3Forwarding", "1", "GetDefaultConnectionService" ) def get_num_port_mapping(self, con_type: str) -> dict[str, Any]: """Call GetPortMappingNumberOfEntries action.""" return self._service_call_action(con_type, "1", "GetPortMappingNumberOfEntries") def get_port_mapping(self, con_type: str, index: int) -> dict[str, Any]: """Call GetGenericPortMappingEntry action.""" return self._service_call_action( con_type, "1", "GetGenericPortMappingEntry", NewPortMappingIndex=index ) def get_wlan_configuration(self, index: int) -> dict[str, Any]: """Call WLANConfiguration service.""" return self._service_call_action("WLANConfiguration", str(index), "GetInfo") def get_wan_link_properties(self) -> dict[str, Any]: """Call WANCommonInterfaceConfig service.""" return self._service_call_action( "WANCommonInterfaceConfig", "1", "GetCommonLinkProperties" ) def set_wlan_configuration(self, index: int, turn_on: bool) -> dict[str, Any]: """Call SetEnable action from WLANConfiguration service.""" return self._service_call_action( "WLANConfiguration", str(index), "SetEnable", NewEnable="1" if turn_on else "0", ) def set_deflection_enable(self, index: int, turn_on: bool) -> dict[str, Any]: """Call SetDeflectionEnable service.""" return self._service_call_action( "X_AVM-DE_OnTel", "1", "SetDeflectionEnable", NewDeflectionId=index, NewEnable="1" if turn_on else "0", ) def add_port_mapping(self, con_type: str, port_mapping: Any) -> dict[str, Any]: """Call AddPortMapping service.""" return self._service_call_action( con_type, "1", "AddPortMapping", **port_mapping ) def set_allow_wan_access(self, ip_address: str, turn_on: bool) -> dict[str, Any]: """Call X_AVM-DE_HostFilter service.""" return self._service_call_action( "X_AVM-DE_HostFilter", "1", "DisallowWANAccessByIP", NewIPv4Address=ip_address, NewDisallow="0" if turn_on else "1", ) @dataclass class FritzData: """Storage class for platform global data.""" tracked: dict = field(default_factory=dict) profile_switches: dict = field(default_factory=dict) class FritzDeviceBase(update_coordinator.CoordinatorEntity[AvmWrapper]): """Entity base class for a device connected to a FRITZ!Box device.""" def __init__(self, avm_wrapper: AvmWrapper, device: FritzDevice) -> None: """Initialize a FRITZ!Box device.""" super().__init__(avm_wrapper) self._avm_wrapper = avm_wrapper self._mac: str = device.mac_address self._name: str = device.hostname or DEFAULT_DEVICE_NAME @property def name(self) -> str: """Return device name.""" return self._name @property def ip_address(self) -> str | None: """Return the primary ip address of the device.""" if self._mac: return self._avm_wrapper.devices[self._mac].ip_address return None @property def mac_address(self) -> str: """Return the mac address of the device.""" return self._mac @property def hostname(self) -> str | None: """Return hostname of the device.""" if self._mac: return self._avm_wrapper.devices[self._mac].hostname return None @property def should_poll(self) -> bool: """No polling needed.""" return False async def async_process_update(self) -> None: """Update device.""" raise NotImplementedError() async def async_on_demand_update(self) -> None: """Update state.""" await self.async_process_update() self.async_write_ha_state() class FritzDevice: """Representation of a device connected to the FRITZ!Box.""" def __init__(self, mac: str, name: str) -> None: """Initialize device info.""" self._connected = False self._connected_to: str | None = None self._connection_type: str | None = None self._ip_address: str | None = None self._last_activity: datetime | None = None self._mac = mac self._name = name self._ssid: str | None = None self._wan_access: bool | None = False def update(self, dev_info: Device, consider_home: float) -> None: """Update device info.""" utc_point_in_time = dt_util.utcnow() if self._last_activity: consider_home_evaluated = ( utc_point_in_time - self._last_activity ).total_seconds() < consider_home else: consider_home_evaluated = dev_info.connected if not self._name: self._name = dev_info.name or self._mac.replace(":", "_") self._connected = dev_info.connected or consider_home_evaluated if dev_info.connected: self._last_activity = utc_point_in_time self._connected_to = dev_info.connected_to self._connection_type = dev_info.connection_type self._ip_address = dev_info.ip_address self._ssid = dev_info.ssid self._wan_access = dev_info.wan_access @property def connected_to(self) -> str | None: """Return connected status.""" return self._connected_to @property def connection_type(self) -> str | None: """Return connected status.""" return self._connection_type @property def is_connected(self) -> bool: """Return connected status.""" return self._connected @property def mac_address(self) -> str: """Get MAC address.""" return self._mac @property def hostname(self) -> str: """Get Name.""" return self._name @property def ip_address(self) -> str | None: """Get IP address.""" return self._ip_address @property def last_activity(self) -> datetime | None: """Return device last activity.""" return self._last_activity @property def ssid(self) -> str | None: """Return device connected SSID.""" return self._ssid @property def wan_access(self) -> bool | None: """Return device wan access.""" return self._wan_access class SwitchInfo(TypedDict): """FRITZ!Box switch info class.""" description: str friendly_name: str icon: str type: str callback_update: Callable callback_switch: Callable class FritzBoxBaseEntity: """Fritz host entity base class.""" def __init__(self, avm_wrapper: AvmWrapper, device_name: str) -> None: """Init device info class.""" self._avm_wrapper = avm_wrapper self._device_name = device_name @property def mac_address(self) -> str: """Return the mac address of the main device.""" return self._avm_wrapper.mac @property def device_info(self) -> DeviceInfo: """Return the device information.""" return DeviceInfo( configuration_url=f"http://{self._avm_wrapper.host}", connections={(dr.CONNECTION_NETWORK_MAC, self.mac_address)}, identifiers={(DOMAIN, self._avm_wrapper.unique_id)}, manufacturer="AVM", model=self._avm_wrapper.model, name=self._device_name, sw_version=self._avm_wrapper.current_firmware, ) @dataclass class ConnectionInfo: """Fritz sensor connection information class.""" connection: str mesh_role: MeshRoles wan_enabled: bool
33.06816
128
0.612556
4a01370c11dbc0d92d4323ffbc146fab61cfb317
3,888
py
Python
preprocess.py
ahforoughi/face-identification
d148c08e744f0224f29991bbb6c416d048d7c11d
[ "MIT" ]
null
null
null
preprocess.py
ahforoughi/face-identification
d148c08e744f0224f29991bbb6c416d048d7c11d
[ "MIT" ]
null
null
null
preprocess.py
ahforoughi/face-identification
d148c08e744f0224f29991bbb6c416d048d7c11d
[ "MIT" ]
null
null
null
from PIL import Image from matplotlib import image import numpy as np from numpy.linalg import norm import numpy as np import cv2 import math from autocrop import Cropper cropper = Cropper() def detect_face(pixels, mtcnn_detector, retina_detector): # check which model is used check_mtcnn = 1 # detect faces in the image using mtcnn results = mtcnn_detector.detect_faces(pixels) # print(results) #if result in MTCNN do not work well or with lower accuarcy we will use Retina for face detection if (not results) or results[0]['confidence'] < 0.9: check_mtcnn = 0 print("====== mtcnn not working") results = retina_detector.predict(pixels) # result_img = detector.draw(pixels,faces) # cv2.imshow("result", result_img) # cv2.waitKey() # cv2.destroyAllWindows() # This is for visualizing the result of detection uncomment if necessory # bounding_box = results[0]['box'] # keypoints = results[0]['keypoints'] # cv2.rectangle(pixels, # (bounding_box[0], bounding_box[1]), # (bounding_box[0]+bounding_box[2], # bounding_box[1] + bounding_box[3]), # (0,155,255), # 2) # cv2.circle(pixels, (keypoints['left_eye']), 2, (255, 0,0), 2) # cv2.circle(pixels, (keypoints['right_eye']), 2, (255, 0,0), 2) # cv2.circle(pixels, (keypoints['nose']), 2, (255,0,0), 2) # cv2.circle(pixels, (keypoints['mouth_left']), 2, (255, 0,0), 2) # cv2.circle(pixels, (keypoints['mouth_right']), 2, (255, 0,0), 2) # #cv2.imwrite("ivan_drawn.jpg", pixels) # #cv2.namedWindow("pixels") # cv2.imshow("image2", pixels) # cv2.waitKey(0) # cv2.destroyAllWindows() return results, check_mtcnn def EuclideanDistance(source_representation, test_representation): euclidean_distance = source_representation - test_representation euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance)) euclidean_distance = np.sqrt(euclidean_distance) return euclidean_distance def alignment_procedure(img, left_eye, right_eye): #this function aligns given face in img based on left and right eye coordinates left_eye_x, left_eye_y = left_eye right_eye_x, right_eye_y = right_eye #----------------------- #find rotation direction if left_eye_y > right_eye_y: point_3rd = (right_eye_x, left_eye_y) direction = -1 #rotate same direction to clock else: point_3rd = (left_eye_x, right_eye_y) direction = 1 #rotate inverse direction of clock #----------------------- #find length of triangle edges a = EuclideanDistance(np.array(left_eye), np.array(point_3rd)) b = EuclideanDistance(np.array(right_eye), np.array(point_3rd)) c = EuclideanDistance(np.array(right_eye), np.array(left_eye)) #----------------------- #apply cosine rule if b != 0 and c != 0: #this multiplication causes division by zero in cos_a calculation cos_a = (b*b + c*c - a*a)/(2*b*c) angle = np.arccos(cos_a) #angle in radian angle = (angle * 180) / math.pi #radian to degree #----------------------- #rotate base image if direction == -1: angle = 90 - angle img = Image.fromarray(img) img = np.array(img.rotate(direction * angle)) #----------------------- return img #return img anyway def face_alignment(img, results): detection = results[0] keypoints = detection["keypoints"] left_eye = keypoints["left_eye"] right_eye = keypoints["right_eye"] img = alignment_procedure(img, left_eye, right_eye) cropped_array = cropper.crop(img) if cropped_array is not None: return cropped_array #cropped_array = cv2.cvtColor(cropped_array, cv2.COLOR_BGR2RGB) return img
30.375
102
0.634774
4a013773ddb7456ed7048624d1a4cf9e49e809be
376
py
Python
examples/03_interval.py
lucasb-eyer/cherrypy-spam-protector
eea2ebc8ae7da068429d252bceb8e307e4821894
[ "Unlicense" ]
1
2017-11-08T16:39:55.000Z
2017-11-08T16:39:55.000Z
examples/03_interval.py
lucasb-eyer/cherrypy-spam-protector
eea2ebc8ae7da068429d252bceb8e307e4821894
[ "Unlicense" ]
null
null
null
examples/03_interval.py
lucasb-eyer/cherrypy-spam-protector
eea2ebc8ae7da068429d252bceb8e307e4821894
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python import cherrypy from spamprotector import IPProtector cherrypy.tools.protect_low_frequency = IPProtector(interval_dt=15.0, interval_reqs=5, mindt_seconds=None) class MyView(object): @cherrypy.expose @cherrypy.tools.protect_low_frequency() def index(self): return "You may hammer me in short bursts." cherrypy.quickstart(MyView())
25.066667
105
0.763298
4a0137f75f2a8b8d01692085dc17be617ac3c983
13,795
py
Python
tests/pmap_test.py
proteneer/jax
52d73e43ef5cb9e4fe4cca715a35f190fee28c89
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/pmap_test.py
proteneer/jax
52d73e43ef5cb9e4fe4cca715a35f190fee28c89
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
tests/pmap_test.py
proteneer/jax
52d73e43ef5cb9e4fe4cca715a35f190fee28c89
[ "ECL-2.0", "Apache-2.0" ]
1
2020-03-29T04:19:27.000Z
2020-03-29T04:19:27.000Z
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from functools import partial from unittest import SkipTest import numpy as onp from absl.testing import absltest from absl.testing import parameterized import jax.numpy as np from jax import test_util as jtu from jax import core from jax import lax from jax.api import pmap, jit, vmap, jvp, grad, make_jaxpr, linearize, device_put from jax.lib import xla_bridge from jax.util import prod from jax.interpreters import pxla from jax.config import config config.parse_flags_with_absl() class PmapTest(jtu.JaxTestCase): def _getMeshShape(self, device_mesh_shape): device_count = xla_bridge.device_count() if any(size == -1 for size in device_mesh_shape): try: return onp.arange(device_count).reshape(device_mesh_shape).shape except ValueError: msg = "device mesh shape {} not compatible with device count {}" raise SkipTest(msg.format(device_mesh_shape, device_count)) else: if device_count % prod(device_mesh_shape): msg = "device mesh size {} does not divide available device count {}" raise SkipTest(msg.format(prod(device_mesh_shape), device_count)) else: return device_mesh_shape def testBasic(self): f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i') shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) expected = x - onp.sum(x, 0) ans = f(x) self.assertAllClose(ans, expected, check_dtypes=False) def testNestedBasic(self): f = lambda x: lax.psum(lax.psum(x, 'i'), 'j') f = pmap(pmap(f, 'i'), 'j') def sum_and_broadcast(x, axis): return onp.repeat(onp.sum(x, axis, keepdims=True), x.shape[axis], axis) shape = (xla_bridge.device_count(), 1, 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) ans = f(x) expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1) self.assertAllClose(ans, expected, check_dtypes=False) @parameterized.named_parameters( {"testcase_name": "_mesh={}".format(device_mesh_shape), "device_mesh_shape": device_mesh_shape} for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)]) def testNestedShardingAndStacking(self, device_mesh_shape): mesh_shape = self._getMeshShape(device_mesh_shape) f = lambda x: x f = pmap(pmap(f, 'i'), 'j') shape = mesh_shape + (4,) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) ans = f(x) expected = x self.assertEqual(ans.shape, expected.shape) self.assertAllClose(ans, expected, check_dtypes=False) def testJvpAndPartialEval(self): @partial(pmap, axis_name='i') def f(x): return np.sin(x) def splitjvp(x): _, jvp = linearize(f, x) return jvp(np.ones_like(x)) shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) expected = onp.cos(x) ans = splitjvp(x) self.assertAllClose(ans, expected, check_dtypes=False) make_jaxpr(splitjvp)(x) # doesn't crash def testGradBasic(self): @partial(pmap, axis_name='i') def f(x): return np.sin(x) shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) ans = grad(lambda x: np.sum(np.sin(x)))(x) expected = grad(lambda x: np.sum(f(x)))(x) self.assertAllClose(ans, expected, check_dtypes=False) def testGradOfJvp(self): @partial(pmap, axis_name='i') def f(x): return np.sin(x) def splitjvp(x): _, jvp = linearize(f, x) return jvp(np.ones_like(x)) fun = lambda x: np.sum(jvp(np.sin, (x,), (np.ones_like(x),))[1]) shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) ans = grad(lambda x: np.sum(splitjvp(x)))(x) expected = grad(fun)(x) self.assertAllClose(ans, expected, check_dtypes=True) def testTwoArgsGrad(self): def f(x, y): return lax.psum(5. * np.cos(x) * np.sin(y), 'i') f = pmap(f, 'i') def g(x, y): tot = np.sum(5. * np.cos(x) * np.sin(y)) return tot * np.ones_like(x) # broadcast to map like pjit does shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) y = 4 + x ans = grad(lambda x, y: np.sum(g(x, y)))(x, y) expected = grad(lambda x, y: np.sum(g(x, y)))(x, y) self.assertAllClose(ans, expected, check_dtypes=False) @parameterized.named_parameters( {"testcase_name": "_mesh={}".format(device_mesh_shape), "device_mesh_shape": device_mesh_shape} for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)]) def testNestedWithClosure(self, device_mesh_shape): mesh_shape = self._getMeshShape(device_mesh_shape) @partial(pmap, axis_name='i') def test_fun(x): y = np.sum(np.sin(x)) @partial(pmap, axis_name='j') def g(z): return 3. * np.exp(np.sin(x).sum() * np.cos(y) * np.tan(z)) return grad(lambda w: np.sum(g(w)))(x) @vmap def baseline_fun(x): y = np.sum(np.sin(x)) @vmap def g(z): return 3. * np.exp(np.sin(x).sum() * np.cos(y) * np.tan(z)) return grad(lambda w: np.sum(g(w)))(x) shape = mesh_shape + (4,) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) ans = grad(lambda x: np.sum(test_fun(x)))(x) expected = grad(lambda x: np.sum(baseline_fun(x)))(x) self.assertAllClose(ans, expected, check_dtypes=True) def testShardedDeviceArrays(self): f = lambda x: 2 * x f = pmap(f, axis_name='i') shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) # test that we can pass in and out ShardedDeviceArrays y = f(x) self.assertIsInstance(y, np.ndarray) self.assertIsInstance(y, pxla.ShardedDeviceArray) self.assertAllClose(y, 2 * x, check_dtypes=False) z = f(y) self.assertIsInstance(z, pxla.ShardedDeviceArray) self.assertAllClose(z, 2 * 2 * x, check_dtypes=False) # test that we can pass in a regular DeviceArray y = f(device_put(x)) self.assertIsInstance(y, pxla.ShardedDeviceArray) self.assertAllClose(y, 2 * x, check_dtypes=False) # test that we can pass a ShardedDeviceArray to a regular jit computation z = y + y self.assertAllClose(z, 2 * 2 * x, check_dtypes=False) # test that we can handle device movement on dispatch y.device_buffers = y.device_buffers[::-1] z = f(y) self.assertAllClose(z, 2 * 2 * x[::-1], check_dtypes=False) # test that the repr doesn't crash repr(z) def testPsumMultiple(self): f = lambda x: lax.psum(x, ('i', 'j')) f = pmap(pmap(f, 'i'), 'j') def sum_and_broadcast(x, axis): return onp.repeat(onp.sum(x, axis, keepdims=True), x.shape[axis], axis) device_count = xla_bridge.device_count() num_pairs, ragged = divmod(device_count, 2) if num_pairs > 1 and not ragged: shape = (num_pairs, 2, 4) else: shape = (device_count, 1, 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) ans = f(x) expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1) self.assertAllClose(ans, expected, check_dtypes=False) def testReplicaGroups(self): groups = pxla.replica_groups(8, [4, 2], (0,)) self.assertEqual(groups, ((0, 2, 4, 6), (1, 3, 5, 7))) groups = pxla.replica_groups(8, [4, 2], (1,)) self.assertEqual(groups, ((0, 1), (2, 3), (4, 5), (6, 7))) groups = pxla.replica_groups(8, [4, 2], (0, 1)) self.assertEqual(groups, ((0, 1, 2, 3, 4, 5, 6, 7,),)) groups = pxla.replica_groups(8, [4, 2], (1, 0)) self.assertEqual(len(groups), 1) self.assertEqual((tuple(sorted(groups[0])),), ((0, 1, 2, 3, 4, 5, 6, 7,),)) # order doesn't matter def testShardedDeviceTuple(self): f = lambda x: core.pack((x, x)) f = pmap(f) shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) # test that we can pass in and out ShardedDeviceTuples (and unpack them) y = f(x) self.assertIsInstance(y, pxla.ShardedDeviceTuple) self.assertIsInstance(y, core.JaxTuple) self.assertAllClose(y, (x, x), check_dtypes=False) z = f(y) self.assertIsInstance(z, pxla.ShardedDeviceTuple) self.assertAllClose(z, (y, y), check_dtypes=True) # test that we can pass a ShardedDeviceTuple to a regular jit computation w = jit(lambda x: list(x)[0])(y) self.assertAllClose(w, x, check_dtypes=False) @jtu.skip_on_devices("cpu", "gpu") def testCollectivePermute(self): device_count = xla_bridge.device_count() rotation = [(i, (i + 1) % device_count) for i in range(device_count)] f = lambda x: lax.ppermute(x, perm=rotation, axis_name='i') f = pmap(f, 'i') x = np.arange(4 * device_count).reshape((device_count, 4)) ans = f(x) expected = onp.roll(x, shift=1, axis=0) self.assertAllClose(ans, expected, check_dtypes=False) @jtu.skip_on_devices("cpu", "gpu") def testRule30(self): # This is a test of collective_permute implementing a simple halo exchange # to run a rule 30 simulation: https://en.wikipedia.org/wiki/Rule_30 # Halo exchange should be useful in spatially-sharded convolutions and in # other simulations. device_count = xla_bridge.device_count() def send_right(x, axis_name): left_perm = [(i, (i + 1) % device_count) for i in range(device_count)] return lax.ppermute(x, perm=left_perm, axis_name=axis_name) def send_left(x, axis_name): left_perm = [((i + 1) % device_count, i) for i in range(device_count)] return lax.ppermute(x, perm=left_perm, axis_name=axis_name) def update_board(board): left = board[:-2] right = board[2:] center = board[1:-1] return lax.bitwise_xor(left, lax.bitwise_or(center, right)) @partial(pmap, axis_name='i') def step(board_slice): left, right = board_slice[:1], board_slice[-1:] right, left = send_left(left, 'i'), send_right(right, 'i') enlarged_board_slice = np.concatenate([left, board_slice, right]) return update_board(enlarged_board_slice) board = onp.zeros(40, dtype=bool) board[board.shape[0] // 2] = True reshaped_board = board.reshape((device_count, -1)) boards = [] def print_board(board): boards.append(''.join('*' if x else ' ' for x in board.ravel())) print_board(reshaped_board) for _ in range(20): reshaped_board = step(reshaped_board) print_board(reshaped_board) ans = '\n'.join(boards) expected = '\n'.join(( ' * ', ' *** ', ' ** * ', ' ** **** ', ' ** * * ', ' ** **** *** ', ' ** * * * ', ' ** **** ****** ', ' ** * *** * ', ' ** **** ** * *** ', ' ** * * **** ** * ', ' ** **** ** * * **** ', ' ** * *** ** ** * * ', ' ** **** ** *** *** ** *** ', ' ** * * *** * *** * * ', ' ** **** ** * * ***** ******* ', ' ** * *** **** * *** * ', ' ** **** ** *** ** ** * *** ', ' ** * * *** * ** *** **** ** * ', ' ** **** ** * ****** * * *** ****', ' * * *** **** **** *** ** * ', )) print(ans) self.assertEqual(ans, expected) @jtu.skip_on_devices("cpu", "gpu") def testReduceMax(self): f = pmap(lambda x: x - lax.pmax(x, 'i'), axis_name='i') shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) expected = x - onp.max(x, 0) ans = f(x) self.assertAllClose(ans, expected, check_dtypes=False) @jtu.skip_on_devices("cpu", "gpu") def testReduceMin(self): f = pmap(lambda x: x - lax.pmin(x, 'i'), axis_name='i') shape = (xla_bridge.device_count(), 4) x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape) expected = x - onp.min(x, 0) ans = f(x) self.assertAllClose(ans, expected, check_dtypes=False) def testDeviceCountError(self): device_count = xla_bridge.device_count() f = pmap(lambda x: x) x = np.arange(device_count + 1) self.assertRaisesRegexp( ValueError, ".*requires.*replicas", lambda: f(x)) f = pmap(lambda x: x) x = onp.ones((device_count + 1, 10)) self.assertRaisesRegexp( ValueError, ".*requires.*replicas", lambda: f(x)) f = pmap(lambda x: pmap(lambda x: x)(x)) x = onp.ones((device_count, 2, 10)) self.assertRaisesRegexp( ValueError, ".*requires.*replicas", lambda: f(x)) if __name__ == '__main__': absltest.main()
33.321256
81
0.599855
4a013a3c1fc4246f01d43679d4eefc7a023de50f
23,062
py
Python
userbot/plugins/archive.py
RiderFA/Dark_Userbot
480df539bfeae994d59649a54d2478ed24b445bb
[ "MIT" ]
null
null
null
userbot/plugins/archive.py
RiderFA/Dark_Userbot
480df539bfeae994d59649a54d2478ed24b445bb
[ "MIT" ]
null
null
null
userbot/plugins/archive.py
RiderFA/Dark_Userbot
480df539bfeae994d59649a54d2478ed24b445bb
[ "MIT" ]
null
null
null
""" usage: reply with file : .rar , .7z create archived file unzip usage: reply with zipped file .unzipper Coded by @furki """ import asyncio import os import shutil import tarfile import time import zipfile from datetime import datetime import patoolib from hachoir.metadata import extractMetadata from hachoir.parser import createParser from telethon.tl.types import DocumentAttributeVideo from userbot import CMD_HELP from userbot.Config import Config from mafiabot.utils import admin_cmd, progress from userbot.cmdhelp import CmdHelp thumb_image_path = Config.TMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg" extracted = Config.TMP_DOWNLOAD_DIRECTORY + "extracted/" if not os.path.isdir(extracted): os.makedirs(extracted) @borg.on(admin_cmd(pattern="compress")) async def _(event): if event.fwd_from: return if not event.is_reply: await event.edit("Reply to a file to compress it.") return mone = await event.edit("Processing ...") if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() try: c_time = time.time() downloaded_file_name = await borg.download_media( reply_message, Config.TMP_DOWNLOAD_DIRECTORY, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, mone, c_time, "trying to download") ), ) directory_name = downloaded_file_name await event.edit(downloaded_file_name) except Exception as e: # pylint:disable=C0103,W0703 await mone.edit(str(e)) zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED).write( directory_name ) await borg.send_file( event.chat_id, directory_name + ".zip", caption="Zipped By mafiaBot", force_document=True, allow_cache=False, reply_to=event.message.id, ) await event.edit("DONE!!!") await asyncio.sleep(5) await event.delete() def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) os.remove(os.path.join(root, file)) @borg.on(admin_cmd(pattern=("rar ?(.*)"))) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) mone = await event.edit("Processing ...") if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() try: c_time = time.time() downloaded_file_name = await borg.download_media( reply_message, Config.TMP_DOWNLOAD_DIRECTORY, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, mone, c_time, "trying to download") ), ) directory_name = downloaded_file_name await event.edit("creating rar archive, please wait..") # patoolib.create_archive(directory_name + '.7z',directory_name) patoolib.create_archive( directory_name + ".rar", (directory_name, Config.TMP_DOWNLOAD_DIRECTORY) ) # patoolib.create_archive("/content/21.yy Avrupa (1).pdf.zip",("/content/21.yy Avrupa (1).pdf","/content/")) await borg.send_file( event.chat_id, directory_name + ".rar", caption="rarred By mafiaBot", force_document=True, allow_cache=False, reply_to=event.message.id, ) try: os.remove(directory_name + ".rar") os.remove(directory_name) except: pass await event.edit("Task Completed") await asyncio.sleep(3) await event.delete() except Exception as e: # pylint:disable=C0103,W0703 await mone.edit(str(e)) elif input_str: directory_name = input_str await event.edit( "Local file compressed to `{}`".format(directory_name + ".rar") ) @borg.on(admin_cmd(pattern=("7z ?(.*)"))) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) mone = await event.edit("Processing ...") if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() try: c_time = time.time() downloaded_file_name = await borg.download_media( reply_message, Config.TMP_DOWNLOAD_DIRECTORY, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, mone, c_time, "trying to download") ), ) directory_name = downloaded_file_name await event.edit("creating 7z archive, please wait..") # patoolib.create_archive(directory_name + '.7z',directory_name) patoolib.create_archive( directory_name + ".7z", (directory_name, Config.TMP_DOWNLOAD_DIRECTORY) ) # patoolib.create_archive("/content/21.yy Avrupa (1).pdf.zip",("/content/21.yy Avrupa (1).pdf","/content/")) await borg.send_file( event.chat_id, directory_name + ".7z", caption="7z archived By mafiaBot", force_document=True, allow_cache=False, reply_to=event.message.id, ) try: os.remove(directory_name + ".7z") os.remove(directory_name) except: pass await event.edit("Task Completed") await asyncio.sleep(3) await event.delete() except Exception as e: # pylint:disable=C0103,W0703 await mone.edit(str(e)) elif input_str: directory_name = input_str await event.edit("Local file compressed to `{}`".format(directory_name + ".7z")) @borg.on(admin_cmd(pattern=("tar ?(.*)"))) async def _(event): if event.fwd_from: return input_str = event.pattern_match.group(1) mone = await event.edit("Processing ...") if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: reply_message = await event.get_reply_message() try: c_time = time.time() downloaded_file_name = await borg.download_media( reply_message, Config.TMP_DOWNLOAD_DIRECTORY, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, mone, c_time, "trying to download") ), ) directory_name = downloaded_file_name await event.edit("Finish downloading to my local") to_upload_file = directory_name output = await create_archive(to_upload_file) is_zip = False if is_zip: check_if_file = await create_archive(to_upload_file) if check_if_file is not None: to_upload_file = check_if_file await borg.send_file( event.chat_id, output, caption="TAR By mafiaBot", force_document=True, allow_cache=False, reply_to=event.message.id, ) try: os.remove(output) os.remove(output) except: pass await event.edit("Task Completed") await asyncio.sleep(3) await event.delete() except Exception as e: # pylint:disable=C0103,W0703 await mone.edit(str(e)) elif input_str: directory_name = input_str await event.edit("Local file compressed to `{}`".format(output)) async def create_archive(input_directory): return_name = None if os.path.exists(input_directory): base_dir_name = os.path.basename(input_directory) compressed_file_name = f"{base_dir_name}.tar.gz" # suffix_extention_length = 1 + 3 + 1 + 2 # if len(base_dir_name) > (64 - suffix_extention_length): # compressed_file_name = base_dir_name[0:(64 - suffix_extention_length)] compressed_file_name += ".tar.gz" file_genertor_command = [ "tar", "-zcvf", compressed_file_name, f"{input_directory}", ] process = await asyncio.create_subprocess_exec( *file_genertor_command, # stdout must a pipe to be accessible as process.stdout stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, ) # Wait for the subprocess to finish stdout, stderr = await process.communicate() stderr.decode().strip() stdout.decode().strip() if os.path.exists(compressed_file_name): try: shutil.rmtree(input_directory) except: pass return_name = compressed_file_name return return_name @borg.on(admin_cmd(pattern="unzip")) async def _(event): if event.fwd_from: return mone = await event.edit("Processing ...") if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: start = datetime.now() reply_message = await event.get_reply_message() try: c_time = time.time() downloaded_file_name = await borg.download_media( reply_message, Config.TMP_DOWNLOAD_DIRECTORY, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, mone, c_time, "trying to download") ), ) except Exception as e: # pylint:disable=C0103,W0703 await mone.edit(str(e)) else: end = datetime.now() ms = (end - start).seconds await mone.edit( "Stored the zip to `{}` in {} seconds.".format(downloaded_file_name, ms) ) with zipfile.ZipFile(downloaded_file_name, "r") as zip_ref: zip_ref.extractall(extracted) filename = sorted(get_lst_of_files(extracted, [])) # filename = filename + "/" await event.edit("Unzipping now") # r=root, d=directories, f = files for single_file in filename: if os.path.exists(single_file): # https://stackoverflow.com/a/678242/4723940 caption_rts = os.path.basename(single_file) force_document = True supports_streaming = False document_attributes = [] if single_file.endswith((".mp4", ".mp3", ".flac", ".webm")): metadata = extractMetadata(createParser(single_file)) duration = 0 width = 0 height = 0 if metadata.has("duration"): duration = metadata.get("duration").seconds if os.path.exists(thumb_image_path): metadata = extractMetadata(createParser(thumb_image_path)) if metadata.has("width"): width = metadata.get("width") if metadata.has("height"): height = metadata.get("height") document_attributes = [ DocumentAttributeVideo( duration=duration, w=width, h=height, round_message=False, supports_streaming=True, ) ] try: await borg.send_file( event.chat_id, single_file, caption=f"UnZipped `{caption_rts}`", force_document=force_document, supports_streaming=supports_streaming, allow_cache=False, reply_to=event.message.id, attributes=document_attributes, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, event, c_time, "trying to upload") ), ) await event.edit("DONE!!!") await asyncio.sleep(5) await event.delete() except Exception as e: await borg.send_message( event.chat_id, "{} caused `{}`".format(caption_rts, str(e)), reply_to=event.message.id, ) # some media were having some issues continue os.remove(single_file) os.remove(downloaded_file_name) @borg.on(admin_cmd(pattern="unrar")) async def _(event): if event.fwd_from: return mone = await event.edit("Processing ...") if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) if event.reply_to_msg_id: start = datetime.now() reply_message = await event.get_reply_message() try: c_time = time.time() downloaded_file_name = await borg.download_media( reply_message, Config.TMP_DOWNLOAD_DIRECTORY, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, mone, c_time, "trying to download") ), ) except Exception as e: # pylint:disable=C0103,W0703 await mone.edit(str(e)) else: end = datetime.now() ms = (end - start).seconds await mone.edit( "Stored the rar to `{}` in {} seconds.".format(downloaded_file_name, ms) ) patoolib.extract_archive(downloaded_file_name, outdir=extracted) filename = sorted(get_lst_of_files(extracted, [])) # filename = filename + "/" await event.edit("Unraring now") # r=root, d=directories, f = files for single_file in filename: if os.path.exists(single_file): # https://stackoverflow.com/a/678242/4723940 caption_rts = os.path.basename(single_file) force_document = True supports_streaming = False document_attributes = [] if single_file.endswith((".mp4", ".mp3", ".flac", ".webm")): metadata = extractMetadata(createParser(single_file)) duration = 0 width = 0 height = 0 if metadata.has("duration"): duration = metadata.get("duration").seconds if os.path.exists(thumb_image_path): metadata = extractMetadata(createParser(thumb_image_path)) if metadata.has("width"): width = metadata.get("width") if metadata.has("height"): height = metadata.get("height") document_attributes = [ DocumentAttributeVideo( duration=duration, w=width, h=height, round_message=False, supports_streaming=True, ) ] try: await borg.send_file( event.chat_id, single_file, caption=f"UnRarred `{caption_rts}`", force_document=force_document, supports_streaming=supports_streaming, allow_cache=False, reply_to=event.message.id, attributes=document_attributes, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, event, c_time, "trying to upload") ), ) await event.edit("DONE!!!") await asyncio.sleep(5) await event.delete() except Exception as e: await borg.send_message( event.chat_id, "{} caused `{}`".format(caption_rts, str(e)), reply_to=event.message.id, ) # some media were having some issues continue os.remove(single_file) os.remove(downloaded_file_name) @borg.on(admin_cmd(pattern="untar")) async def _(event): if event.fwd_from: return mone = await event.edit("Processing ...") if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY): os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY) extracted = Config.TMP_DOWNLOAD_DIRECTORY + "extracted/" thumb_image_path = Config.TMP_DOWNLOAD_DIRECTORY + "/thumb_image.jpg" if not os.path.isdir(extracted): os.makedirs(extracted) if event.reply_to_msg_id: start = datetime.now() reply_message = await event.get_reply_message() try: c_time = time.time() downloaded_file_name = await borg.download_media( reply_message, Config.TMP_DOWNLOAD_DIRECTORY, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, mone, c_time, "trying to download") ), ) except Exception as e: # pylint:disable=C0103,W0703 await mone.edit(str(e)) else: end = datetime.now() ms = (end - start).seconds await mone.edit( "Stored the tar to `{}` in {} seconds.".format(downloaded_file_name, ms) ) with tarfile.TarFile.open(downloaded_file_name, "r") as tar_file: tar_file.extractall(path=extracted) # tf = tarfile.open(downloaded_file_name) # tf.extractall(path=extracted) # tf.close() # with zipfile.ZipFile(downloaded_file_name, 'r') as zip_ref: # zip_ref.extractall(extracted) filename = sorted(get_lst_of_files(extracted, [])) # filename = filename + "/" await event.edit("Untarring now") # r=root, d=directories, f = files for single_file in filename: if os.path.exists(single_file): # https://stackoverflow.com/a/678242/4723940 caption_rts = os.path.basename(single_file) force_document = False supports_streaming = True document_attributes = [] if single_file.endswith((".mp4", ".mp3", ".flac", ".webm")): metadata = extractMetadata(createParser(single_file)) duration = 0 width = 0 height = 0 if metadata.has("duration"): duration = metadata.get("duration").seconds if os.path.exists(thumb_image_path): metadata = extractMetadata(createParser(thumb_image_path)) if metadata.has("width"): width = metadata.get("width") if metadata.has("height"): height = metadata.get("height") document_attributes = [ DocumentAttributeVideo( duration=duration, w=width, h=height, round_message=False, supports_streaming=True, ) ] try: await borg.send_file( event.chat_id, single_file, caption=f"Untared `{caption_rts}`", force_document=force_document, supports_streaming=supports_streaming, allow_cache=False, reply_to=event.message.id, attributes=document_attributes, progress_callback=lambda d, t: asyncio.get_event_loop().create_task( progress(d, t, event, c_time, "trying to upload") ), ) await event.edit("DONE!!!") await asyncio.sleep(5) await event.delete() except Exception as e: await borg.send_message( event.chat_id, "{} caused `{}`".format(caption_rts, str(e)), reply_to=event.message.id, ) # some media were having some issues continue os.remove(single_file) os.remove(downloaded_file_name) def get_lst_of_files(input_directory, output_lst): filesinfolder = os.listdir(input_directory) for file_name in filesinfolder: current_file_name = os.path.join(input_directory, file_name) if os.path.isdir(current_file_name): return get_lst_of_files(current_file_name, output_lst) output_lst.append(current_file_name) return output_lst CmdHelp("archive").add_command( 'zip', 'Reply to file/media', 'It will zip the file/media' ).add_command( 'rar', 'Reply to file/media', 'It will rar the file/media' ).add_command( '7z', 'Reply to file/media', 'It will 7z the file/media' ).add_command( 'tar', 'Reply to file/media', 'It will tar the file/media' ).add_command( 'unzip', 'Reply to zip file', 'It will unzip the zip file' ).add_command( 'unrar', 'Reply to rar file', 'It will unrar the rar file' ).add_command( 'untar', 'Reply to tar file', 'It will untar the tar file' ).add_command( 'compress', 'Reply to file/media', 'It will compress the replied media/file' ).add()
39.899654
120
0.5405
4a013bce9c88e0a77468cf5718e7e2aa5eff5467
6,516
py
Python
build/scripts/fetch_from_sandbox.py
schmidek/catboost
cb598acb045f13307c46fb5c32b49bbd78cfd122
[ "Apache-2.0" ]
null
null
null
build/scripts/fetch_from_sandbox.py
schmidek/catboost
cb598acb045f13307c46fb5c32b49bbd78cfd122
[ "Apache-2.0" ]
null
null
null
build/scripts/fetch_from_sandbox.py
schmidek/catboost
cb598acb045f13307c46fb5c32b49bbd78cfd122
[ "Apache-2.0" ]
null
null
null
import urllib2 import random import sys import time import os import logging import subprocess import json import itertools import optparse import fetch_from ORIGIN_SUFFIX = '?origin=fetch-from-sandbox' MDS_PREFIX = 'http://storage-int.mds.yandex.net/get-sandbox/' def parse_args(): parser = optparse.OptionParser(option_list=fetch_from.common_options()) parser.add_option('--resource-id', dest='resource_id') parser.add_option('--custom-fetcher', dest='custom_fetcher') return parser.parse_args() SANDBOX_PROXY_URL = "https://proxy.sandbox.yandex-team.ru/{}?origin=fetch-from-sandbox" class ResourceFetchingError(Exception): pass class ResourceInfoError(Exception): pass class UnsupportedProtocolException(Exception): pass class PutRequest(urllib2.Request): def get_method(self, *args, **kwargs): return 'PUT' def download_by_skynet(resource_info, file_name): def _sky_path(): return "/usr/local/bin/sky" def is_skynet_avaliable(): if not os.path.exists(_sky_path()): return False try: subprocess.check_output([_sky_path(), "--version"]) return True except subprocess.CalledProcessError: return False except OSError: return False def sky_get(skynet_id, target_dir, timeout=None): cmd_args = [_sky_path(), 'get', "-N", "Backbone", "--user", "--wait", "--dir", target_dir, skynet_id] if timeout is not None: cmd_args += ["--timeout", str(timeout)] logging.debug('Call skynet with args: %s', cmd_args) stdout = subprocess.check_output(cmd_args).strip() logging.debug('Skynet call with args %s is finished, result is %s', cmd_args, stdout) return stdout if not is_skynet_avaliable(): raise UnsupportedProtocolException("Skynet is not available") skynet_id = resource_info.get("skynet_id") if not skynet_id: raise ValueError("Resource does not have skynet_id") temp_dir = os.path.abspath(fetch_from.uniq_string_generator()) os.mkdir(temp_dir) sky_get(skynet_id, temp_dir) return os.path.join(temp_dir, file_name) def _urlopen(url, data=None): n = 10 for i in xrange(n): try: return urllib2.urlopen(url, timeout=30, data=data).read() except urllib2.HTTPError as e: logging.error(e) if e.code not in (500, 503, 504): raise except Exception as e: logging.error(e) if i + 1 == n: raise e time.sleep(i) def _query(url): return json.loads(_urlopen(url)) def _query_put(url, data): return _urlopen(PutRequest(url), data) def get_resource_info(resource_id): return _query('https://sandbox.yandex-team.ru/api/v1.0/resource/' + str(resource_id)) def update_access_time(resource_id): return _query_put('https://sandbox.yandex-team.ru/api/v1.0/resource/' + str(resource_id), {}) def get_resource_http_links(resource_id): return [r['url'] + ORIGIN_SUFFIX for r in _query('https://sandbox.yandex-team.ru/api/v1.0/resource/{}/data/http'.format(resource_id))] def fetch_via_script(script, resource_id): return subprocess.check_output([script, str(resource_id)]).rstrip() def fetch(resource_id, custom_fetcher): try: resource_info = get_resource_info(resource_id) except Exception as e: raise ResourceInfoError(str(e)) logging.info('Resource %s info %s', str(resource_id), json.dumps(resource_info)) try: update_access_time(resource_id) except Exception as e: sys.stderr.write("Failed to update access time for {} resource: {}\n".format(resource_id, e)) resource_file_name = os.path.basename(resource_info["file_name"]) expected_md5 = resource_info.get('md5') proxy_link = resource_info['http']['proxy'] + ORIGIN_SUFFIX mds_id = resource_info.get('attributes', {}).get('mds') mds_link = MDS_PREFIX + mds_id if mds_id else None def get_storage_links(): storage_links = get_resource_http_links(resource_id) random.shuffle(storage_links) return storage_links def iter_tries(): yield lambda: download_by_skynet(resource_info, resource_file_name) if custom_fetcher: yield lambda: fetch_via_script(custom_fetcher, resource_id) # Don't try too hard here: we will get back to proxy later on yield lambda: fetch_from.fetch_url(proxy_link, False, resource_file_name, expected_md5, tries=2) for x in get_storage_links(): # Don't spend too much time connecting single host yield lambda: fetch_from.fetch_url(x, False, resource_file_name, expected_md5, tries=1) if mds_link is not None: # Don't try too hard here: we will get back to MDS later on yield lambda: fetch_from.fetch_url(mds_link, True, resource_file_name, expected_md5, tries=2) yield lambda: fetch_from.fetch_url(proxy_link, False, resource_file_name, expected_md5) if mds_link is not None: yield lambda: fetch_from.fetch_url(mds_link, True, resource_file_name, expected_md5) if resource_info.get('attributes', {}).get('ttl') != 'inf': sys.stderr.write('WARNING: resource {} ttl is not "inf".\n'.format(resource_id)) exc_info = None for i, action in enumerate(itertools.islice(iter_tries(), 0, 10)): try: fetched_file = action() break except Exception as e: logging.exception(e) exc_info = exc_info or sys.exc_info() time.sleep(i) else: raise exc_info[0], exc_info[1], exc_info[2] return fetched_file, resource_info['file_name'] def main(opts, outputs): custom_fetcher = os.environ.get('YA_CUSTOM_FETCHER') fetched_file, file_name = fetch(opts.resource_id, custom_fetcher) fetch_from.process(fetched_file, file_name, opts, outputs, not custom_fetcher) if __name__ == '__main__': log_file_name = os.path.basename(__file__) + '.log' abs_log_path = os.path.abspath(log_file_name) logging.basicConfig(filename=log_file_name, level=logging.DEBUG) opts, args = parse_args() try: main(opts, args) except Exception as e: logging.exception(e) print >>sys.stderr, open(abs_log_path).read() sys.stderr.flush() sys.exit(fetch_from.INFRASTRUCTURE_ERROR if fetch_from.is_temporary(e) else 1)
30.448598
138
0.672652
4a013c2b2f035128d2474281a1d6c0d82469246d
3,852
py
Python
var/spack/repos/builtin/packages/intel-oneapi-mpi/package.py
mt-empty/spack
4573261de5b32bb22289752b9023aa767b50b700
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2021-03-05T10:54:32.000Z
2021-03-05T14:14:52.000Z
var/spack/repos/builtin/packages/intel-oneapi-mpi/package.py
mt-empty/spack
4573261de5b32bb22289752b9023aa767b50b700
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
32
2020-12-15T17:29:20.000Z
2022-03-21T15:08:31.000Z
var/spack/repos/builtin/packages/intel-oneapi-mpi/package.py
mt-empty/spack
4573261de5b32bb22289752b9023aa767b50b700
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2021-07-19T20:31:27.000Z
2021-07-19T21:14:14.000Z
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import platform import subprocess from spack import * class IntelOneapiMpi(IntelOneApiLibraryPackage): """Intel oneAPI MPI.""" maintainers = ['rscohn2', 'danvev'] homepage = 'https://software.intel.com/content/www/us/en/develop/tools/oneapi/components/mpi-library.html' if platform.system() == 'Linux': version('2021.3.0', url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17947/l_mpi_oneapi_p_2021.3.0.294_offline.sh', sha256='04c48f864ee4c723b1b4ca62f2bea8c04d5d7e3de19171fd62b17868bc79bc36', expand=False) version('2021.2.0', url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17729/l_mpi_oneapi_p_2021.2.0.215_offline.sh', sha256='d0d4cdd11edaff2e7285e38f537defccff38e37a3067c02f4af43a3629ad4aa3', expand=False) version('2021.1.1', url='https://registrationcenter-download.intel.com/akdlm/irc_nas/17397/l_mpi_oneapi_p_2021.1.1.76_offline.sh', sha256='8b7693a156c6fc6269637bef586a8fd3ea6610cac2aae4e7f48c1fbb601625fe', expand=False) provides('mpi@:3') depends_on('patchelf', type='build') @property def component_dir(self): return 'mpi' def setup_dependent_package(self, module, dep_spec): dir = join_path(self.component_path, 'bin') self.spec.mpicc = join_path(dir, 'mpicc') self.spec.mpicxx = join_path(dir, 'mpicxx') self.spec.mpif77 = join_path(dir, 'mpif77') self.spec.mpifc = join_path(dir, 'mpifc') def setup_dependent_build_environment(self, env, dependent_spec): env.set('MPICH_CC', spack_cc) env.set('MPICH_CXX', spack_cxx) env.set('MPICH_F77', spack_f77) env.set('MPICH_F90', spack_fc) env.set('MPICH_FC', spack_fc) # Set compiler wrappers for dependent build stage dir = join_path(self.component_path, 'bin') env.set('MPICC', join_path(dir, 'mpicc')) env.set('MPICXX', join_path(dir, 'mpicxx')) env.set('MPIF77', join_path(dir, 'mpif77')) env.set('MPIF90', join_path(dir, 'mpif90')) env.set('MPIFC', join_path(dir, 'mpifc')) @property def libs(self): libs = [] for dir in [join_path('lib', 'release_mt'), 'lib', join_path('libfabric', 'lib')]: lib_path = join_path(self.component_path, dir) ldir = find_libraries('*', root=lib_path, shared=True, recursive=False) libs += ldir return libs def install(self, spec, prefix): super(IntelOneapiMpi, self).install(spec, prefix) # need to patch libmpi.so so it can always find libfabric libfabric_rpath = join_path(self.component_path, 'libfabric', 'lib') for lib_version in ['debug', 'release', 'release_mt', 'debug_mt']: file = join_path(self.component_path, 'lib', lib_version, 'libmpi.so') subprocess.call(['patchelf', '--set-rpath', libfabric_rpath, file]) # fix I_MPI_SUBSTITUTE_INSTALLDIR and # __EXEC_PREFIX_TO_BE_FILLED_AT_INSTALL_TIME__ scripts = ["mpif77", "mpif90", "mpigcc", "mpigxx", "mpiicc", "mpiicpc", "mpiifort"] for script in scripts: file = join_path(self.component_path, 'bin', script) filter_file('I_MPI_SUBSTITUTE_INSTALLDIR', self.component_path, file, backup=False) filter_file('__EXEC_PREFIX_TO_BE_FILLED_AT_INSTALL_TIME__', self.component_path, file, backup=False)
40.978723
127
0.638629
4a013d15be658043b8473ea44f1aad0183bf5d4c
246
py
Python
Leetcode/0754. Reach a Number/0754.py
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
Leetcode/0754. Reach a Number/0754.py
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
Leetcode/0754. Reach a Number/0754.py
Next-Gen-UI/Code-Dynamics
a9b9d5e3f27e870b3e030c75a1060d88292de01c
[ "MIT" ]
null
null
null
class Solution: def reachNumber(self, target: int) -> int: ans = 0 pos = 0 target = abs(target) while pos < target: ans += 1 pos += ans while (pos - target) & 1: ans += 1 pos += ans return ans
15.375
44
0.504065
4a013e24748f76dbed2d84b2d87828d5e8558353
9,695
py
Python
conformer/preprocess/ksponspeech.py
dudgns0908/KoASR
6617acd94362c6d70614bf0c69a8088843ad3f4f
[ "Apache-2.0" ]
null
null
null
conformer/preprocess/ksponspeech.py
dudgns0908/KoASR
6617acd94362c6d70614bf0c69a8088843ad3f4f
[ "Apache-2.0" ]
null
null
null
conformer/preprocess/ksponspeech.py
dudgns0908/KoASR
6617acd94362c6d70614bf0c69a8088843ad3f4f
[ "Apache-2.0" ]
null
null
null
import os import re import unicodedata from typing import Union import pandas as pd from tqdm import tqdm from conformer.preprocess.types import KsponSpeechVocabType, SpeechModeType PERCENT_FILES = { '087797': '퍼센트', '215401': '퍼센트', '284574': '퍼센트', '397184': '퍼센트', '501006': '프로', '502173': '프로', '542363': '프로', '581483': '퍼센트' } def bracket_filter(sentence, mode='phonetic'): new_sentence = str() if mode == 'phonetic': flag = False for ch in sentence: if ch == '(' and flag is False: flag = True continue if ch == '(' and flag is True: flag = False continue if ch != ')' and flag is False: new_sentence += ch elif mode == 'spelling': flag = True for ch in sentence: if ch == '(': continue if ch == ')': if flag is True: flag = False continue else: flag = True continue if ch != ')' and flag is True: new_sentence += ch else: raise ValueError("Unsupported mode : {0}".format(mode)) return new_sentence def special_filter(sentence, mode='phonetic', replace=None): SENTENCE_MARK = ['?', '!', '.'] NOISE = ['o', 'n', 'u', 'b', 'l'] EXCEPT = ['/', '+', '*', '-', '@', '$', '^', '&', '[', ']', '=', ':', ';', ','] new_sentence = str() for idx, ch in enumerate(sentence): if ch not in SENTENCE_MARK: if idx + 1 < len(sentence) and ch in NOISE and sentence[idx + 1] == '/': continue if ch == '#': new_sentence += '샾' elif ch == '%': if mode == 'phonetic': new_sentence += replace elif mode == 'spelling': new_sentence += '%' elif ch not in EXCEPT: new_sentence += ch pattern = re.compile(r'\s\s+') new_sentence = re.sub(pattern, ' ', new_sentence.strip()) return new_sentence def sentence_filter(sentence, mode, replace): return special_filter(bracket_filter(sentence, mode), mode, replace) class KsponSpeech: train_trn = ('train.trn',) eval_trn = ("eval_clean.trn", "eval_other.trn") def preprocess( self, script_file_dir: str, mode: str = SpeechModeType.PHOENTIC, vocab_type: str = KsponSpeechVocabType.CHARACTER, manifest_file_path: str = './manifest.csv', vocab_path: str = './vocab.csv', ): train_audio_paths, train_transcripts = self.preprocess_sentence(script_file_dir, self.train_trn, mode) eval_audio_paths, eval_transcripts = self.preprocess_sentence(script_file_dir, self.eval_trn, mode) audio_paths = train_audio_paths + eval_audio_paths transcripts = train_transcripts + eval_transcripts self.save_manifest( audio_paths, transcripts, manifest_file_path, vocab_path, vocab_type=vocab_type ) def preprocess_sentence( self, script_file_dir: str, script_file_name: Union[str, tuple, list], mode: str = SpeechModeType.PHOENTIC ): script_names = [script_file_name] if isinstance(script_file_name, str) else script_file_name audio_paths = [] transcripts = [] for script_name in script_names: with open(os.path.join(script_file_dir, script_name), 'r') as f: for line in tqdm(f.readlines()): audio_path, raw_transcript = line.split(" :: ") audio_paths.append(audio_path) file_name = os.path.basename(audio_path) replace = PERCENT_FILES.get(file_name[12:18], None) transcript = sentence_filter(raw_transcript, mode=mode, replace=replace) transcripts.append(transcript) return audio_paths, transcripts def save_manifest( self, audio_paths: list, transcripts: list, manifest_file_path: str, vocab_path: str, vocab_type: str = KsponSpeechVocabType.CHARACTER ): vocabs = [] if vocab_type == KsponSpeechVocabType.GRAPHEME: vocabs = self.generate_grapheme(transcripts, vocab_path) elif vocab_type == KsponSpeechVocabType.CHARACTER: vocabs = self.generate_character(transcripts, vocab_path) vocab2id, id2vocab = self.get_label(vocab_path) with open(manifest_file_path, "w") as f: for audio_path, transcript, vocab in zip(audio_paths, transcripts, vocabs): vocab_id_transcript = self.sentence_to_target(vocab, vocab2id) f.write(f'{audio_path}\t{transcript}\t{vocab_id_transcript}\n') def generate_vocab( self, sentences: list, vocab_path: str, vocab_type: str = KsponSpeechVocabType.GRAPHEME ) -> list: vocabs = list() vocab_freq = list() result = list() for sentence in tqdm(sentences): sentence_vocab = sentence if vocab_type == KsponSpeechVocabType.GRAPHEME: sentence_vocab = unicodedata.normalize('NFKD', sentence).replace(' ', '|').upper() result.append(sentence_vocab) for vocab in sentence_vocab: if vocab not in vocabs: vocabs.append(vocab) vocab_freq.append(1) else: vocab_freq[vocabs.index(vocab)] += 1 vocab_dict = { 'id': [0, 1, 2], 'vocab': ['<pad>', '<sos>', '<eos>'], 'freq': [0, 0, 0] } for idx, (freq, grapheme) in enumerate(sorted(zip(vocab_freq, vocabs), reverse=True), start=3): vocab_dict['id'].append(idx) vocab_dict['vocab'].append(grapheme) vocab_dict['freq'].append(freq) if vocab_type == KsponSpeechVocabType.CHARACTER: vocab_dict['id'] = vocab_dict['id'][:2000] vocab_dict['vocab'] = vocab_dict['vocab'][:2000] vocab_dict['freq'] = vocab_dict['freq'][:2000] vocab_df = pd.DataFrame(vocab_dict) vocab_df.to_csv(vocab_path, encoding="utf-8", index=False) return result # def generate_grapheme(self, sentences: list, vocab_path: str) -> list: # vocabs = list() # vocab_freq = list() # graphemes = [] # for sentence in tqdm(sentences): # sentence_grapheme = unicodedata.normalize('NFKD', sentence).replace(' ', '|').upper() # graphemes.append(sentence_grapheme) # for grapheme in sentence_grapheme: # if grapheme not in vocabs: # vocabs.append(grapheme) # vocab_freq.append(1) # else: # vocab_freq[vocabs.index(grapheme)] += 1 # # vocab_dict = { # 'id': [0, 1, 2], # 'vocab': ['<pad>', '<sos>', '<eos>'], # 'freq': [0, 0, 0] # } # # # vocab_freq, vocab_list = zip(*sorted(zip(vocab_freq, vocabs), reverse=True)) # for idx, (freq, grapheme) in enumerate(sorted(zip(vocab_freq, vocabs), reverse=True), start=3): # vocab_dict['id'].append(idx) # vocab_dict['vocab'].append(grapheme) # vocab_dict['freq'].append(freq) # # vocab_df = pd.DataFrame(vocab_dict) # vocab_df.to_csv(vocab_path, encoding="utf-8", index=False) # return graphemes # # def generate_character(self, sentences: list, vocab_path: str) -> list: # vocabs = list() # vocab_freq = list() # # for sentence in sentences: # for ch in sentence: # if ch not in vocabs: # vocabs.append(ch) # vocab_freq.append(1) # else: # vocab_freq[vocabs.index(ch)] += 1 # # # sort together Using zip # label_freq, label_list = zip(*sorted(zip(vocab_freq, vocabs), reverse=True)) # label = { # 'id': [0, 1, 2, 3], # 'vocab': ['<pad>', '<sos>', '<eos>', '<blank>'], # 'freq': [0, 0, 0, 0] # } # # for idx, (ch, freq) in enumerate(zip(label_list, label_freq)): # label['id'].append(idx + 4) # label['vocab'].append(ch) # label['freq'].append(freq) # # label['id'] = label['id'][:2000] # label['vocab'] = label['vocab'][:2000] # label['freq'] = label['freq'][:2000] # # label_df = pd.DataFrame(label) # label_df.to_csv(vocab_path, encoding="utf-8", index=False) # return sentences def generate_subword(self, sentences: list, vocab_path: str) -> list: vocabs = list() def get_label(self, vocab_path: str): vocab_data_frame = pd.read_csv(vocab_path, encoding="utf-8") id_list = vocab_data_frame["id"] vocab_list = vocab_data_frame["vocab"] vocab2id = dict() id2grpm = dict() for _id, grpm in zip(id_list, vocab_list): vocab2id[grpm] = _id id2grpm[_id] = grpm return vocab2id, id2grpm def sentence_to_target(self, transcript, vocab2id): target = str() for vocab in transcript: target += (str(vocab2id[vocab]) + ' ') return target[:-1]
33.663194
110
0.537597
4a013ed8539d78cfc9d24a1c982d5dd3a9e76559
2,236
py
Python
mrp/examples/11_elasticsearch/a02es_indexer.py
colesbury/fairo
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
[ "MIT" ]
null
null
null
mrp/examples/11_elasticsearch/a02es_indexer.py
colesbury/fairo
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
[ "MIT" ]
null
null
null
mrp/examples/11_elasticsearch/a02es_indexer.py
colesbury/fairo
9e50a3aa7369c68c80e84d80abd5fcdee8a9277a
[ "MIT" ]
null
null
null
"""Demo AlephZero logs -> ElasticSearch.""" import a0 import elasticsearch import json import signal class A02ES_Indexer: def __init__(self): # Connect to the local elasticsearch engine. self._es = elasticsearch.Elasticsearch( "http://localhost:9200", request_timeout=10 ) # Connect to the alephzero logger. self._a0 = a0.Subscriber("log/announce", self.on_log_announce) def on_log_announce(self, pkt): # AlephZero logger made an announcement. # We only care if that announcement is that a log file has been completed. info = json.loads(pkt.payload) if info["action"] != "closed": return # Grab path data from the announcement. abspath = info["write_abspath"] relpath = info["write_relpath"] original_path = info["read_relpath"] # Per-packet callback. def read_handle(tlk, fpkt): # Add standard fields. data = { "id": fpkt.id, "abspath": abspath, "relpath": relpath, "original_path": original_path, "offset": tlk.frame().off, "payload_size": len(fpkt.payload_view) } # Add all headers. # TODO(lshamis): Add directive headers like "_index.payload". for k, v in fpkt.headers: data.setdefault(k, []).append(v) try: # Index the data into ES. # TODO(lshamis): Can we batch the operation across multiple packets? self._es.index(index="myindex", document=data) except Exception as err: # TODO(lshamis): Maybe retry. print(f"skipping pkt: {err}") # Iterate through each packet of the closed log file. fileopts = a0.File.Options.DEFAULT fileopts.open_options.arena_mode = a0.Arena.Mode.READONLY r = a0.ReaderSyncZeroCopy( a0.File(info["write_abspath"], fileopts), a0.INIT_OLDEST ) while r.can_read(): r.read(read_handle) def main(): indexer = A02ES_Indexer() signal.pause() if __name__ == "__main__": main()
31.055556
84
0.571109
4a01403583d133c46b12c754de90e3874951dabb
6,897
py
Python
neutron/tests/api/test_extension_driver_port_security.py
wwriverrat/neutron
1d0aadb6fcf40048e899df6688bda98744c2a999
[ "Apache-2.0" ]
1
2016-03-25T21:13:13.000Z
2016-03-25T21:13:13.000Z
neutron/tests/api/test_extension_driver_port_security.py
wwriverrat/neutron
1d0aadb6fcf40048e899df6688bda98744c2a999
[ "Apache-2.0" ]
1
2021-03-21T11:39:22.000Z
2021-03-21T11:39:22.000Z
neutron/tests/api/test_extension_driver_port_security.py
wwriverrat/neutron
1d0aadb6fcf40048e899df6688bda98744c2a999
[ "Apache-2.0" ]
1
2021-03-21T11:37:18.000Z
2021-03-21T11:37:18.000Z
# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from tempest import test from tempest_lib import exceptions as lib_exc from neutron.tests.api import base from neutron.tests.api import base_security_groups as base_security FAKE_IP = '10.0.0.1' FAKE_MAC = '00:25:64:e8:19:dd' @ddt.ddt class PortSecTest(base_security.BaseSecGroupTest, base.BaseNetworkTest): @test.attr(type='smoke') @test.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495') @test.requires_ext(extension='port-security', service='network') def test_port_sec_default_value(self): # Default port-sec value is True, and the attr of the port will inherit # from the port-sec of the network when it not be specified in API network = self.create_network() self.assertTrue(network['port_security_enabled']) self.create_subnet(network) port = self.create_port(network) self.assertTrue(port['port_security_enabled']) @test.attr(type='smoke') @test.idempotent_id('e60eafd2-31de-4c38-8106-55447d033b57') @test.requires_ext(extension='port-security', service='network') @ddt.unpack @ddt.data({'port_sec_net': False, 'port_sec_port': True, 'expected': True}, {'port_sec_net': True, 'port_sec_port': False, 'expected': False}) def test_port_sec_specific_value(self, port_sec_net, port_sec_port, expected): network = self.create_network(port_security_enabled=port_sec_net) self.create_subnet(network) port = self.create_port(network, port_security_enabled=port_sec_port) self.assertEqual(network['port_security_enabled'], port_sec_net) self.assertEqual(port['port_security_enabled'], expected) @test.attr(type=['smoke']) @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') @test.requires_ext(extension='port-security', service='network') def test_create_port_sec_with_security_group(self): network = self.create_network(port_security_enabled=True) self.create_subnet(network) port = self.create_port(network, security_groups=[]) self.assertTrue(port['port_security_enabled']) self.client.delete_port(port['id']) port = self.create_port(network, security_groups=[], port_security_enabled=False) self.assertFalse(port['port_security_enabled']) self.assertEmpty(port['security_groups']) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') @test.requires_ext(extension='port-security', service='network') def test_port_sec_update_port_failed(self): network = self.create_network() self.create_subnet(network) sec_group_body, sec_group_name = self._create_security_group() port = self.create_port(network) # Exception when set port-sec to False with sec-group defined self.assertRaises(lib_exc.Conflict, self.update_port, port, port_security_enabled=False) port = self.update_port(port, security_groups=[], port_security_enabled=False) self.assertEmpty(port['security_groups']) self.assertFalse(port['port_security_enabled']) port = self.update_port( port, security_groups=[sec_group_body['security_group']['id']], port_security_enabled=True) self.assertNotEmpty(port['security_groups']) self.assertTrue(port['port_security_enabled']) # Remove security group from port before deletion on resource_cleanup self.update_port(port, security_groups=[]) @test.attr(type=['smoke']) @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') @test.requires_ext(extension='port-security', service='network') def test_port_sec_update_pass(self): network = self.create_network() self.create_subnet(network) sec_group, _ = self._create_security_group() sec_group_id = sec_group['security_group']['id'] port = self.create_port(network, security_groups=[sec_group_id], port_security_enabled=True) self.assertNotEmpty(port['security_groups']) self.assertTrue(port['port_security_enabled']) port = self.update_port(port, security_groups=[]) self.assertEmpty(port['security_groups']) self.assertTrue(port['port_security_enabled']) port = self.update_port(port, security_groups=[sec_group_id]) self.assertNotEmpty(port['security_groups']) port = self.update_port(port, security_groups=[], port_security_enabled=False) self.assertEmpty(port['security_groups']) self.assertFalse(port['port_security_enabled']) @test.attr(type=['smoke']) @test.idempotent_id('2df6114b-b8c3-48a1-96e8-47f08159d35c') @test.requires_ext(extension='port-security', service='network') def test_delete_with_port_sec(self): network = self.create_network(port_security_enabled=True) port = self.create_port(network=network, port_security_enabled=True) self.client.delete_port(port['id']) self.assertTrue(self.client.is_resource_deleted('port', port['id'])) self.client.delete_network(network['id']) self.assertTrue( self.client.is_resource_deleted('network', network['id'])) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('ed93e453-3f8d-495e-8e7e-b0e268c2ebd9') @test.requires_ext(extension='port-security', service='network') @test.requires_ext(extension='allowed-address-pairs', service='network') def test_allow_address_pairs(self): network = self.create_network() self.create_subnet(network) port = self.create_port(network=network, port_security_enabled=False) allowed_address_pairs = [{'ip_address': FAKE_IP, 'mac_address': FAKE_MAC}] # Exception when set address-pairs with port-sec is False self.assertRaises(lib_exc.Conflict, self.update_port, port, allowed_address_pairs=allowed_address_pairs)
44.496774
79
0.679861
4a01403e46de9f38e45ae57bc08d4a42b2a85bbf
4,278
py
Python
model/mnist_dataset.py
MapleLeafKiller/tensorflow-triplet-loss
ac349e344a647c5aeed7248926a76a0fe64f4cb7
[ "MIT" ]
null
null
null
model/mnist_dataset.py
MapleLeafKiller/tensorflow-triplet-loss
ac349e344a647c5aeed7248926a76a0fe64f4cb7
[ "MIT" ]
null
null
null
model/mnist_dataset.py
MapleLeafKiller/tensorflow-triplet-loss
ac349e344a647c5aeed7248926a76a0fe64f4cb7
[ "MIT" ]
null
null
null
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """tf.data.Dataset interface to the MNIST dataset.""" import gzip import os import shutil import numpy as np from six.moves import urllib import tensorflow as tf def read32(bytestream): """Read 4 bytes from bytestream as an unsigned 32-bit integer.""" dt = np.dtype(np.uint32).newbyteorder('>') return np.frombuffer(bytestream.read(4), dtype=dt)[0] def check_image_file_header(filename): """Validate that filename corresponds to images for the MNIST dataset.""" with tf.gfile.Open(filename, 'rb') as f: magic = read32(f) read32(f) # num_images, unused rows = read32(f) cols = read32(f) if magic != 2051: raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name)) if rows != 28 or cols != 28: raise ValueError( 'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' % (f.name, rows, cols)) def check_labels_file_header(filename): """Validate that filename corresponds to labels for the MNIST dataset.""" with tf.gfile.Open(filename, 'rb') as f: magic = read32(f) read32(f) # num_items, unused if magic != 2049: raise ValueError('Invalid magic number %d in MNIST file %s' % (magic, f.name)) def download(directory, filename): """Download (and unzip) a file from the MNIST dataset if not already done.""" filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' zipped_filepath = filepath + '.gz' print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath def dataset(directory, images_file, labels_file): """Download and parse MNIST dataset.""" images_file = download(directory, images_file) labels_file = download(directory, labels_file) check_image_file_header(images_file) check_labels_file_header(labels_file) def decode_image(image): # Normalize from [0, 255] to [0.0, 1.0] image = tf.decode_raw(image, tf.uint8) image = tf.cast(image, tf.float32) image = tf.reshape(image, [784]) return image / 255.0 def decode_label(label): label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8] label = tf.reshape(label, []) # label is a scalar return tf.to_int32(label) images = tf.data.FixedLengthRecordDataset(images_file, 28 * 28, header_bytes=16) images = images.map(decode_image) labels = tf.data.FixedLengthRecordDataset(labels_file, 1, header_bytes=8).map(decode_label) # print("images.output_types=", images.output_types) # float32 # print("images.output_shapes=", images.output_shapes) # (784,) # print("labels.output_types=", labels.output_types) # int32 # print("labels.output_shapes=", labels.output_shapes) # () return tf.data.Dataset.zip((images, labels)) def train(directory): """tf.data.Dataset object for MNIST training data.""" return dataset(directory, 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte') def test(directory): """tf.data.Dataset object for MNIST test data.""" return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
37.858407
95
0.675082
4a01422460217cbfd0917c6efd9dca8b1ca89062
250
py
Python
snapchat_problems/problem_1.py
saadaoui-salah/Daily-Coding-Problem
48378fd74e948513a512302003d1f6bed869115b
[ "MIT" ]
null
null
null
snapchat_problems/problem_1.py
saadaoui-salah/Daily-Coding-Problem
48378fd74e948513a512302003d1f6bed869115b
[ "MIT" ]
null
null
null
snapchat_problems/problem_1.py
saadaoui-salah/Daily-Coding-Problem
48378fd74e948513a512302003d1f6bed869115b
[ "MIT" ]
null
null
null
"""This problem was asked by Snapchat. Given an array of time intervals (start, end) for classroom lectures (possibly overlapping), find the minimum number of rooms required. For example, given [(30, 75), (0, 50), (60, 150)], you should return 2. """
50
75
0.724
4a014229367e4539de28c131ba01603c8169bb33
70
py
Python
gumo/dev_server/infrastrucutre/__init__.py
gumo-py/gumo-dev-server
a8c5c92dcda022f7e640ecc525b4f0ebb0ed4cdd
[ "MIT" ]
null
null
null
gumo/dev_server/infrastrucutre/__init__.py
gumo-py/gumo-dev-server
a8c5c92dcda022f7e640ecc525b4f0ebb0ed4cdd
[ "MIT" ]
29
2019-07-27T09:38:59.000Z
2021-06-25T15:24:17.000Z
gumo/dev_server/infrastrucutre/__init__.py
gumo-py/gumo-dev-server
a8c5c92dcda022f7e640ecc525b4f0ebb0ed4cdd
[ "MIT" ]
1
2019-08-23T00:49:39.000Z
2019-08-23T00:49:39.000Z
from gumo.core.infrastructure import MockAppEngineEnvironment # noqa
35
69
0.857143
4a014283c8650112323007992fe702702707ad66
5,967
py
Python
venv/Lib/site-packages/setuptools/command/sdist.py
gilbertekalea/booking.com_crawler
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
[ "MIT" ]
1,318
2019-07-11T10:34:39.000Z
2022-03-29T15:05:19.000Z
venv/Lib/site-packages/setuptools/command/sdist.py
gilbertekalea/booking.com_crawler
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
[ "MIT" ]
387
2019-09-05T16:33:09.000Z
2022-03-31T10:43:39.000Z
venv/Lib/site-packages/setuptools/command/sdist.py
gilbertekalea/booking.com_crawler
71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae
[ "MIT" ]
66
2019-11-11T15:33:12.000Z
2022-03-01T07:55:55.000Z
from distutils import log import distutils.command.sdist as orig import os import sys import io import contextlib from .py36compat import sdist_add_defaults import pkg_resources _default_revctrl = list def walk_revctrl(dirname=''): """Find all files under revision control""" for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): for item in ep.load()(dirname): yield item class sdist(sdist_add_defaults, orig.sdist): """Smart sdist that finds anything supported by revision control""" user_options = [ ('formats=', None, "formats for source distribution (comma-separated list)"), ('keep-temp', 'k', "keep the distribution tree around after creating " + "archive file(s)"), ('dist-dir=', 'd', "directory to put the source distribution archive(s) in " "[default: dist]"), ] negative_opt = {} README_EXTENSIONS = ['', '.rst', '.txt', '.md'] READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS) def run(self): self.run_command('egg_info') ei_cmd = self.get_finalized_command('egg_info') self.filelist = ei_cmd.filelist self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt')) self.check_readme() # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) self.make_distribution() dist_files = getattr(self.distribution, 'dist_files', []) for file in self.archive_files: data = ('sdist', '', file) if data not in dist_files: dist_files.append(data) def initialize_options(self): orig.sdist.initialize_options(self) self._default_to_gztar() def _default_to_gztar(self): # only needed on Python prior to 3.6. if sys.version_info >= (3, 6, 0, 'beta', 1): return self.formats = ['gztar'] def make_distribution(self): """ Workaround for #516 """ with self._remove_os_link(): orig.sdist.make_distribution(self) @staticmethod @contextlib.contextmanager def _remove_os_link(): """ In a context, remove and restore os.link if it exists """ class NoValue: pass orig_val = getattr(os, 'link', NoValue) try: del os.link except Exception: pass try: yield finally: if orig_val is not NoValue: setattr(os, 'link', orig_val) def _add_defaults_optional(self): super()._add_defaults_optional() if os.path.isfile('pyproject.toml'): self.filelist.append('pyproject.toml') def _add_defaults_python(self): """getting python files""" if self.distribution.has_pure_modules(): build_py = self.get_finalized_command('build_py') self.filelist.extend(build_py.get_source_files()) self._add_data_files(self._safe_data_files(build_py)) def _safe_data_files(self, build_py): """ Extracting data_files from build_py is known to cause infinite recursion errors when `include_package_data` is enabled, so suppress it in that case. """ if self.distribution.include_package_data: return () return build_py.data_files def _add_data_files(self, data_files): """ Add data files as found in build_py.data_files. """ self.filelist.extend( os.path.join(src_dir, name) for _, src_dir, _, filenames in data_files for name in filenames ) def _add_defaults_data_files(self): try: super()._add_defaults_data_files() except TypeError: log.warn("data_files contains unexpected objects") def check_readme(self): for f in self.READMES: if os.path.exists(f): return else: self.warn( "standard file not found: should have one of " + ', '.join(self.READMES) ) def make_release_tree(self, base_dir, files): orig.sdist.make_release_tree(self, base_dir, files) # Save any egg_info command line options used to create this sdist dest = os.path.join(base_dir, 'setup.cfg') if hasattr(os, 'link') and os.path.exists(dest): # unlink and re-copy, since it might be hard-linked, and # we don't want to change the source version os.unlink(dest) self.copy_file('setup.cfg', dest) self.get_finalized_command('egg_info').save_version_info(dest) def _manifest_is_not_generated(self): # check for special comment used in 2.7.1 and higher if not os.path.isfile(self.manifest): return False with io.open(self.manifest, 'rb') as fp: first_line = fp.readline() return (first_line != '# file GENERATED by distutils, do NOT edit\n'.encode()) def read_manifest(self): """Read the manifest file (named by 'self.manifest') and use it to fill in 'self.filelist', the list of files to include in the source distribution. """ log.info("reading manifest file '%s'", self.manifest) manifest = open(self.manifest, 'rb') for line in manifest: # The manifest must contain UTF-8. See #303. try: line = line.decode('UTF-8') except UnicodeDecodeError: log.warn("%r not UTF-8 decodable -- skipping" % line) continue # ignore comments and blank lines line = line.strip() if line.startswith('#') or not line: continue self.filelist.append(line) manifest.close()
31.405263
75
0.593431
4a01435de1378c683fac4e5d77b6d9e031cf8ea8
538
py
Python
dpad_manager.py
puzzlestory/royal-flush-telegram-bot
b06b84665b3efcce77144083217ab66b1be20643
[ "MIT" ]
3
2021-07-07T16:56:14.000Z
2021-08-15T19:07:15.000Z
dpad_manager.py
puzzlestory/t-royal-flush-telegram-bot
b06b84665b3efcce77144083217ab66b1be20643
[ "MIT" ]
null
null
null
dpad_manager.py
puzzlestory/t-royal-flush-telegram-bot
b06b84665b3efcce77144083217ab66b1be20643
[ "MIT" ]
1
2020-10-19T17:47:58.000Z
2020-10-19T17:47:58.000Z
from json import loads from urllib import request, parse from env import DP_URL def write_dp(sub_page, content): full_url = ''.join([DP_URL, sub_page]) data = parse.urlencode({'text' : content}).encode('utf-8') req = request.Request(full_url, data) with request.urlopen(req) as response: return response.read() def read_dp(sub_page): full_url = [DP_URL, sub_page, '.body.json?lastUpdate=0'] with request.urlopen(''.join(full_url)) as response: resp = response.read() return loads(resp.decode())['body']
26.9
60
0.697026
4a0144779b218e7f4e2baac4f37ac5aac55b93c9
1,193
py
Python
yapylib/tools/generate_words.py
twocucao/YaPyLib
8fe56f35b9f45d3c7f688ab5842c3a1e50688e01
[ "MIT" ]
2
2017-05-21T01:58:37.000Z
2018-02-23T15:35:14.000Z
yapylib/tools/generate_words.py
twocucao/YaPyLib
8fe56f35b9f45d3c7f688ab5842c3a1e50688e01
[ "MIT" ]
1
2021-06-08T19:12:08.000Z
2021-06-08T19:12:08.000Z
yapylib/tools/generate_words.py
twocucao/YaPyLib
8fe56f35b9f45d3c7f688ab5842c3a1e50688e01
[ "MIT" ]
null
null
null
""" 1. 从字幕中抽取所有单词,并且进行还原 2. """ import os import re from collections import Counter def get_all_words(filename): """ :param filename: :return: """ print(filename) all_words = set() try: all_words = re.findall("[a-zA-Z]+", open(filename).read(), flags=re.ASCII) except Exception: all_words = re.findall("[a-zA-Z]+", open(filename, encoding='gbk').read(), flags=re.ASCII) finally: filtered_words = set() for word in all_words: if len(word) <= 2: continue word = word.lower() filtered_words.add(word) return filtered_words def get_all_file(filename): all_folders = [os.path.join(filename, folder) for folder in os.listdir(filename)] print(all_folders) all_files = [] for folder in all_folders: all_files.extend([os.path.join(folder, file) for file in os.listdir(folder)]) all_words = set() for file in all_files: words = get_all_words(file) all_words.update(words) a = list(all_words) a.sort() with open("all_words.txt", "wt") as f: for i in a: f.write(i) f.write("\n")
24.346939
98
0.588433
4a0144b33ef8766f696d88d0cb803c91ac3c89f2
14,849
py
Python
python/unitytrainers/bc/trainer.py
jacksonJabba/ActiveRagdollAssaultCourse
2969a341ccf2036ef74c900653c2578125a6e6bc
[ "Apache-2.0" ]
1
2018-07-21T20:44:36.000Z
2018-07-21T20:44:36.000Z
python/unitytrainers/bc/trainer.py
Sohojoe/ActiveRagdollDeliberatePractice
20c50e5bd4f15a27f31ee82393f8f43eec3bd70b
[ "Apache-2.0" ]
null
null
null
python/unitytrainers/bc/trainer.py
Sohojoe/ActiveRagdollDeliberatePractice
20c50e5bd4f15a27f31ee82393f8f43eec3bd70b
[ "Apache-2.0" ]
null
null
null
# # Unity ML-Agents Toolkit # ## ML-Agent Learning (Imitation) # Contains an implementation of Behavioral Cloning Algorithm import logging import os import numpy as np import tensorflow as tf from unityagents import AllBrainInfo from unitytrainers.bc.models import BehavioralCloningModel from unitytrainers.buffer import Buffer from unitytrainers.trainer import UnityTrainerException, Trainer logger = logging.getLogger("unityagents") class BehavioralCloningTrainer(Trainer): """The ImitationTrainer is an implementation of the imitation learning.""" def __init__(self, sess, env, brain_name, trainer_parameters, training, seed): """ Responsible for collecting experiences and training PPO model. :param sess: Tensorflow session. :param env: The UnityEnvironment. :param trainer_parameters: The parameters for the trainer (dictionary). :param training: Whether the trainer is set for training. """ self.param_keys = ['brain_to_imitate', 'batch_size', 'time_horizon', 'graph_scope', 'summary_freq', 'max_steps', 'batches_per_epoch', 'use_recurrent', 'hidden_units', 'num_layers', 'sequence_length', 'memory_size'] for k in self.param_keys: if k not in trainer_parameters: raise UnityTrainerException("The hyperparameter {0} could not be found for the Imitation trainer of " "brain {1}.".format(k, brain_name)) super(BehavioralCloningTrainer, self).__init__(sess, env, brain_name, trainer_parameters, training) self.variable_scope = trainer_parameters['graph_scope'] self.brain_to_imitate = trainer_parameters['brain_to_imitate'] self.batches_per_epoch = trainer_parameters['batches_per_epoch'] self.use_recurrent = trainer_parameters['use_recurrent'] self.sequence_length = 1 self.m_size = None if self.use_recurrent: self.m_size = trainer_parameters["memory_size"] self.sequence_length = trainer_parameters["sequence_length"] self.n_sequences = max(int(trainer_parameters['batch_size'] / self.sequence_length), 1) self.cumulative_rewards = {} self.episode_steps = {} self.stats = {'losses': [], 'episode_length': [], 'cumulative_reward': []} self.training_buffer = Buffer() self.is_continuous_action = (env.brains[brain_name].vector_action_space_type == "continuous") self.use_visual_observations = (env.brains[brain_name].number_visual_observations > 0) self.use_vector_observations = (env.brains[brain_name].vector_observation_space_size > 0) self.summary_path = trainer_parameters['summary_path'] if not os.path.exists(self.summary_path): os.makedirs(self.summary_path) self.summary_writer = tf.summary.FileWriter(self.summary_path) with tf.variable_scope(self.variable_scope): tf.set_random_seed(seed) self.model = BehavioralCloningModel( h_size=int(trainer_parameters['hidden_units']), lr=float(trainer_parameters['learning_rate']), n_layers=int(trainer_parameters['num_layers']), m_size=self.m_size, normalize=False, use_recurrent=trainer_parameters['use_recurrent'], brain=self.brain) self.inference_run_list = [self.model.sample_action] if self.use_recurrent: self.inference_run_list += [self.model.memory_out] def __str__(self): return '''Hyperparameters for the Imitation Trainer of brain {0}: \n{1}'''.format( self.brain_name, '\n'.join(['\t{0}:\t{1}'.format(x, self.trainer_parameters[x]) for x in self.param_keys])) @property def parameters(self): """ Returns the trainer parameters of the trainer. """ return self.trainer_parameters @property def graph_scope(self): """ Returns the graph scope of the trainer. """ return self.variable_scope @property def get_max_steps(self): """ Returns the maximum number of steps. Is used to know when the trainer should be stopped. :return: The maximum number of steps of the trainer """ return float(self.trainer_parameters['max_steps']) @property def get_step(self): """ Returns the number of steps the trainer has performed :return: the step count of the trainer """ return self.sess.run(self.model.global_step) @property def get_last_reward(self): """ Returns the last reward the trainer has had :return: the new last reward """ if len(self.stats['cumulative_reward']) > 0: return np.mean(self.stats['cumulative_reward']) else: return 0 def increment_step_and_update_last_reward(self): """ Increment the step count of the trainer and Updates the last reward """ self.sess.run(self.model.increment_step) return def take_action(self, all_brain_info: AllBrainInfo): """ Decides actions given state/observation information, and takes them in environment. :param all_brain_info: AllBrainInfo from environment. :return: a tuple containing action, memories, values and an object to be passed to add experiences """ if len(all_brain_info[self.brain_name].agents) == 0: return [], [], [], None agent_brain = all_brain_info[self.brain_name] feed_dict = {self.model.dropout_rate: 1.0, self.model.sequence_length: 1} if self.use_visual_observations: for i, _ in enumerate(agent_brain.visual_observations): feed_dict[self.model.visual_in[i]] = agent_brain.visual_observations[i] if self.use_vector_observations: feed_dict[self.model.vector_in] = agent_brain.vector_observations if self.use_recurrent: if agent_brain.memories.shape[1] == 0: agent_brain.memories = np.zeros((len(agent_brain.agents), self.m_size)) feed_dict[self.model.memory_in] = agent_brain.memories agent_action, memories = self.sess.run(self.inference_run_list, feed_dict) return agent_action, memories, None, None else: agent_action = self.sess.run(self.inference_run_list, feed_dict) return agent_action, None, None, None def add_experiences(self, curr_info: AllBrainInfo, next_info: AllBrainInfo, take_action_outputs): """ Adds experiences to each agent's experience history. :param curr_info: Current AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo). :param next_info: Next AllBrainInfo (Dictionary of all current brains and corresponding BrainInfo). :param take_action_outputs: The outputs of the take action method. """ # Used to collect teacher experience into training buffer info_teacher = curr_info[self.brain_to_imitate] next_info_teacher = next_info[self.brain_to_imitate] for agent_id in info_teacher.agents: self.training_buffer[agent_id].last_brain_info = info_teacher for agent_id in next_info_teacher.agents: stored_info_teacher = self.training_buffer[agent_id].last_brain_info if stored_info_teacher is None: continue else: idx = stored_info_teacher.agents.index(agent_id) next_idx = next_info_teacher.agents.index(agent_id) if stored_info_teacher.text_observations[idx] != "": info_teacher_record, info_teacher_reset = \ stored_info_teacher.text_observations[idx].lower().split(",") next_info_teacher_record, next_info_teacher_reset = next_info_teacher.text_observations[idx].\ lower().split(",") if next_info_teacher_reset == "true": self.training_buffer.reset_update_buffer() else: info_teacher_record, next_info_teacher_record = "true", "true" if info_teacher_record == "true" and next_info_teacher_record == "true": if not stored_info_teacher.local_done[idx]: if self.use_visual_observations: for i, _ in enumerate(stored_info_teacher.visual_observations): self.training_buffer[agent_id]['visual_observations%d' % i]\ .append(stored_info_teacher.visual_observations[i][idx]) if self.use_vector_observations: self.training_buffer[agent_id]['vector_observations']\ .append(stored_info_teacher.vector_observations[idx]) if self.use_recurrent: if stored_info_teacher.memories.shape[1] == 0: stored_info_teacher.memories = np.zeros((len(stored_info_teacher.agents), self.m_size)) self.training_buffer[agent_id]['memory'].append(stored_info_teacher.memories[idx]) self.training_buffer[agent_id]['actions'].append(next_info_teacher. previous_vector_actions[next_idx]) info_student = curr_info[self.brain_name] next_info_student = next_info[self.brain_name] for agent_id in info_student.agents: self.training_buffer[agent_id].last_brain_info = info_student # Used to collect information about student performance. for agent_id in next_info_student.agents: stored_info_student = self.training_buffer[agent_id].last_brain_info if stored_info_student is None: continue else: next_idx = next_info_student.agents.index(agent_id) if agent_id not in self.cumulative_rewards: self.cumulative_rewards[agent_id] = 0 self.cumulative_rewards[agent_id] += next_info_student.rewards[next_idx] if not next_info_student.local_done[next_idx]: if agent_id not in self.episode_steps: self.episode_steps[agent_id] = 0 self.episode_steps[agent_id] += 1 def process_experiences(self, current_info: AllBrainInfo, next_info: AllBrainInfo): """ Checks agent histories for processing condition, and processes them as necessary. Processing involves calculating value and advantage targets for model updating step. :param current_info: Current AllBrainInfo :param next_info: Next AllBrainInfo """ info_teacher = next_info[self.brain_to_imitate] for l in range(len(info_teacher.agents)): if ((info_teacher.local_done[l] or len(self.training_buffer[info_teacher.agents[l]]['actions']) > self.trainer_parameters[ 'time_horizon']) and len(self.training_buffer[info_teacher.agents[l]]['actions']) > 0): agent_id = info_teacher.agents[l] self.training_buffer.append_update_buffer(agent_id, batch_size=None, training_length=self.sequence_length) self.training_buffer[agent_id].reset_agent() info_student = next_info[self.brain_name] for l in range(len(info_student.agents)): if info_student.local_done[l]: agent_id = info_student.agents[l] self.stats['cumulative_reward'].append( self.cumulative_rewards.get(agent_id, 0)) self.stats['episode_length'].append( self.episode_steps.get(agent_id, 0)) self.cumulative_rewards[agent_id] = 0 self.episode_steps[agent_id] = 0 def end_episode(self): """ A signal that the Episode has ended. The buffer must be reset. Get only called when the academy resets. """ self.training_buffer.reset_all() for agent_id in self.cumulative_rewards: self.cumulative_rewards[agent_id] = 0 for agent_id in self.episode_steps: self.episode_steps[agent_id] = 0 def is_ready_update(self): """ Returns whether or not the trainer has enough elements to run update model :return: A boolean corresponding to whether or not update_model() can be run """ return len(self.training_buffer.update_buffer['actions']) > self.n_sequences def update_model(self): """ Uses training_buffer to update model. """ self.training_buffer.update_buffer.shuffle() batch_losses = [] for j in range( min(len(self.training_buffer.update_buffer['actions']) // self.n_sequences, self.batches_per_epoch)): _buffer = self.training_buffer.update_buffer start = j * self.n_sequences end = (j + 1) * self.n_sequences feed_dict = {self.model.dropout_rate: 0.5, self.model.batch_size: self.n_sequences, self.model.sequence_length: self.sequence_length} if self.is_continuous_action: feed_dict[self.model.true_action] = np.array(_buffer['actions'][start:end]).\ reshape([-1, self.brain.vector_action_space_size]) else: feed_dict[self.model.true_action] = np.array(_buffer['actions'][start:end]).reshape([-1]) if self.use_vector_observations: feed_dict[self.model.vector_in] = np.array(_buffer['vector_observations'][start:end])\ .reshape([-1, self.brain.vector_observation_space_size * self.brain.num_stacked_vector_observations]) if self.use_visual_observations: for i, _ in enumerate(self.model.visual_in): _obs = np.array(_buffer['visual_observations%d' % i][start:end]) feed_dict[self.model.visual_in[i]] = _obs if self.use_recurrent: feed_dict[self.model.memory_in] = np.zeros([self.n_sequences, self.m_size]) loss, _ = self.sess.run([self.model.loss, self.model.update], feed_dict=feed_dict) batch_losses.append(loss) if len(batch_losses) > 0: self.stats['losses'].append(np.mean(batch_losses)) else: self.stats['losses'].append(0)
48.526144
121
0.630278
4a0144cc51b02001045db15e2663f12aa11133ed
631
py
Python
Hello.py
kristy2018/PRACTICA1.1
b5519facd235d3bd45afe42fddf47a8bf2cd636a
[ "MIT" ]
null
null
null
Hello.py
kristy2018/PRACTICA1.1
b5519facd235d3bd45afe42fddf47a8bf2cd636a
[ "MIT" ]
null
null
null
Hello.py
kristy2018/PRACTICA1.1
b5519facd235d3bd45afe42fddf47a8bf2cd636a
[ "MIT" ]
null
null
null
from pade.misc.utility import display_message from pade.misc.common import set_ams, start_loop from pade.core.agent import Agent from pade.acl.aid import AID class AgenteHelloWorld(Agent): def __init__(self, aid): super(AgenteHelloWorld, self).__init__(aid=aid, debug=False) display_message(self.aid.localname, 'Hello World!') if __name__ == '__main__': set_ams('localhost', 8000, debug=False) agents = list() agente_hello = AgenteHelloWorld(AID(name='agente_hello')) agente_hello.ams = {'name': 'localhost', 'port': 8000} agents.append(agente_hello) start_loop(agents, gui=True)
27.434783
68
0.719493
4a0144ffcf29cf22ba0363503849cc72c0e95d9e
12,039
py
Python
code/python/TimeSeriesAPIforDigitalPortals/v2/fds/sdk/TimeSeriesAPIforDigitalPortals/model/vendor_chart_iq_time_series_eod_list_meta.py
factset/enterprise-sdk
3fd4d1360756c515c9737a0c9a992c7451d7de7e
[ "Apache-2.0" ]
6
2022-02-07T16:34:18.000Z
2022-03-30T08:04:57.000Z
code/python/TimeSeriesAPIforDigitalPortals/v2/fds/sdk/TimeSeriesAPIforDigitalPortals/model/vendor_chart_iq_time_series_eod_list_meta.py
factset/enterprise-sdk
3fd4d1360756c515c9737a0c9a992c7451d7de7e
[ "Apache-2.0" ]
2
2022-02-07T05:25:57.000Z
2022-03-07T14:18:04.000Z
code/python/TimeSeriesAPIforDigitalPortals/v2/fds/sdk/TimeSeriesAPIforDigitalPortals/model/vendor_chart_iq_time_series_eod_list_meta.py
factset/enterprise-sdk
3fd4d1360756c515c9737a0c9a992c7451d7de7e
[ "Apache-2.0" ]
null
null
null
""" Prime Developer Trial No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: v1 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from fds.sdk.TimeSeriesAPIforDigitalPortals.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from fds.sdk.TimeSeriesAPIforDigitalPortals.exceptions import ApiAttributeError def lazy_import(): from fds.sdk.TimeSeriesAPIforDigitalPortals.model.attributes_member import AttributesMember from fds.sdk.TimeSeriesAPIforDigitalPortals.model.vendor_chart_iq_time_series_eod_list_meta_pagination import VendorChartIQTimeSeriesEodListMetaPagination globals()['AttributesMember'] = AttributesMember globals()['VendorChartIQTimeSeriesEodListMetaPagination'] = VendorChartIQTimeSeriesEodListMetaPagination class VendorChartIQTimeSeriesEodListMeta(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'attributes': (AttributesMember,), # noqa: E501 'pagination': (VendorChartIQTimeSeriesEodListMetaPagination,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'attributes': 'attributes', # noqa: E501 'pagination': 'pagination', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """VendorChartIQTimeSeriesEodListMeta - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) attributes (AttributesMember): [optional] # noqa: E501 pagination (VendorChartIQTimeSeriesEodListMetaPagination): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """VendorChartIQTimeSeriesEodListMeta - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) attributes (AttributesMember): [optional] # noqa: E501 pagination (VendorChartIQTimeSeriesEodListMetaPagination): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
44.921642
158
0.58975
4a01455a7173574cc93985126b672a13357269be
58,838
py
Python
sdk/python/pulumi_aws/opsworks/static_web_layer.py
chivandikwa/pulumi-aws
19c08bf9dcb90544450ffa4eec7bf6751058fde2
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/opsworks/static_web_layer.py
chivandikwa/pulumi-aws
19c08bf9dcb90544450ffa4eec7bf6751058fde2
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_aws/opsworks/static_web_layer.py
chivandikwa/pulumi-aws
19c08bf9dcb90544450ffa4eec7bf6751058fde2
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['StaticWebLayerArgs', 'StaticWebLayer'] @pulumi.input_type class StaticWebLayerArgs: def __init__(__self__, *, stack_id: pulumi.Input[str], auto_assign_elastic_ips: Optional[pulumi.Input[bool]] = None, auto_assign_public_ips: Optional[pulumi.Input[bool]] = None, auto_healing: Optional[pulumi.Input[bool]] = None, cloudwatch_configuration: Optional[pulumi.Input['StaticWebLayerCloudwatchConfigurationArgs']] = None, custom_configure_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_deploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_instance_profile_arn: Optional[pulumi.Input[str]] = None, custom_json: Optional[pulumi.Input[str]] = None, custom_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_setup_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_shutdown_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_undeploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, drain_elb_on_shutdown: Optional[pulumi.Input[bool]] = None, ebs_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]]] = None, elastic_load_balancer: Optional[pulumi.Input[str]] = None, install_updates_on_boot: Optional[pulumi.Input[bool]] = None, instance_shutdown_timeout: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, system_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, use_ebs_optimized_instances: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a StaticWebLayer resource. :param pulumi.Input[str] stack_id: The id of the stack the layer will belong to. :param pulumi.Input[bool] auto_assign_elastic_ips: Whether to automatically assign an elastic IP address to the layer's instances. :param pulumi.Input[bool] auto_assign_public_ips: For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. :param pulumi.Input[bool] auto_healing: Whether to enable auto-healing for the layer. :param pulumi.Input[str] custom_instance_profile_arn: The ARN of an IAM profile that will be used for the layer's instances. :param pulumi.Input[Sequence[pulumi.Input[str]]] custom_security_group_ids: Ids for a set of security groups to apply to the layer's instances. :param pulumi.Input[bool] drain_elb_on_shutdown: Whether to enable Elastic Load Balancing connection draining. :param pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]] ebs_volumes: `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances. :param pulumi.Input[str] elastic_load_balancer: Name of an Elastic Load Balancer to attach to this layer :param pulumi.Input[bool] install_updates_on_boot: Whether to install OS and package updates on each instance when it boots. :param pulumi.Input[int] instance_shutdown_timeout: The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. :param pulumi.Input[str] name: A human-readable name for the layer. :param pulumi.Input[Sequence[pulumi.Input[str]]] system_packages: Names of a set of system packages to install on the layer's instances. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[bool] use_ebs_optimized_instances: Whether to use EBS-optimized instances. """ pulumi.set(__self__, "stack_id", stack_id) if auto_assign_elastic_ips is not None: pulumi.set(__self__, "auto_assign_elastic_ips", auto_assign_elastic_ips) if auto_assign_public_ips is not None: pulumi.set(__self__, "auto_assign_public_ips", auto_assign_public_ips) if auto_healing is not None: pulumi.set(__self__, "auto_healing", auto_healing) if cloudwatch_configuration is not None: pulumi.set(__self__, "cloudwatch_configuration", cloudwatch_configuration) if custom_configure_recipes is not None: pulumi.set(__self__, "custom_configure_recipes", custom_configure_recipes) if custom_deploy_recipes is not None: pulumi.set(__self__, "custom_deploy_recipes", custom_deploy_recipes) if custom_instance_profile_arn is not None: pulumi.set(__self__, "custom_instance_profile_arn", custom_instance_profile_arn) if custom_json is not None: pulumi.set(__self__, "custom_json", custom_json) if custom_security_group_ids is not None: pulumi.set(__self__, "custom_security_group_ids", custom_security_group_ids) if custom_setup_recipes is not None: pulumi.set(__self__, "custom_setup_recipes", custom_setup_recipes) if custom_shutdown_recipes is not None: pulumi.set(__self__, "custom_shutdown_recipes", custom_shutdown_recipes) if custom_undeploy_recipes is not None: pulumi.set(__self__, "custom_undeploy_recipes", custom_undeploy_recipes) if drain_elb_on_shutdown is not None: pulumi.set(__self__, "drain_elb_on_shutdown", drain_elb_on_shutdown) if ebs_volumes is not None: pulumi.set(__self__, "ebs_volumes", ebs_volumes) if elastic_load_balancer is not None: pulumi.set(__self__, "elastic_load_balancer", elastic_load_balancer) if install_updates_on_boot is not None: pulumi.set(__self__, "install_updates_on_boot", install_updates_on_boot) if instance_shutdown_timeout is not None: pulumi.set(__self__, "instance_shutdown_timeout", instance_shutdown_timeout) if name is not None: pulumi.set(__self__, "name", name) if system_packages is not None: pulumi.set(__self__, "system_packages", system_packages) if tags is not None: pulumi.set(__self__, "tags", tags) if use_ebs_optimized_instances is not None: pulumi.set(__self__, "use_ebs_optimized_instances", use_ebs_optimized_instances) @property @pulumi.getter(name="stackId") def stack_id(self) -> pulumi.Input[str]: """ The id of the stack the layer will belong to. """ return pulumi.get(self, "stack_id") @stack_id.setter def stack_id(self, value: pulumi.Input[str]): pulumi.set(self, "stack_id", value) @property @pulumi.getter(name="autoAssignElasticIps") def auto_assign_elastic_ips(self) -> Optional[pulumi.Input[bool]]: """ Whether to automatically assign an elastic IP address to the layer's instances. """ return pulumi.get(self, "auto_assign_elastic_ips") @auto_assign_elastic_ips.setter def auto_assign_elastic_ips(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_assign_elastic_ips", value) @property @pulumi.getter(name="autoAssignPublicIps") def auto_assign_public_ips(self) -> Optional[pulumi.Input[bool]]: """ For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. """ return pulumi.get(self, "auto_assign_public_ips") @auto_assign_public_ips.setter def auto_assign_public_ips(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_assign_public_ips", value) @property @pulumi.getter(name="autoHealing") def auto_healing(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable auto-healing for the layer. """ return pulumi.get(self, "auto_healing") @auto_healing.setter def auto_healing(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_healing", value) @property @pulumi.getter(name="cloudwatchConfiguration") def cloudwatch_configuration(self) -> Optional[pulumi.Input['StaticWebLayerCloudwatchConfigurationArgs']]: return pulumi.get(self, "cloudwatch_configuration") @cloudwatch_configuration.setter def cloudwatch_configuration(self, value: Optional[pulumi.Input['StaticWebLayerCloudwatchConfigurationArgs']]): pulumi.set(self, "cloudwatch_configuration", value) @property @pulumi.getter(name="customConfigureRecipes") def custom_configure_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_configure_recipes") @custom_configure_recipes.setter def custom_configure_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_configure_recipes", value) @property @pulumi.getter(name="customDeployRecipes") def custom_deploy_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_deploy_recipes") @custom_deploy_recipes.setter def custom_deploy_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_deploy_recipes", value) @property @pulumi.getter(name="customInstanceProfileArn") def custom_instance_profile_arn(self) -> Optional[pulumi.Input[str]]: """ The ARN of an IAM profile that will be used for the layer's instances. """ return pulumi.get(self, "custom_instance_profile_arn") @custom_instance_profile_arn.setter def custom_instance_profile_arn(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_instance_profile_arn", value) @property @pulumi.getter(name="customJson") def custom_json(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "custom_json") @custom_json.setter def custom_json(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_json", value) @property @pulumi.getter(name="customSecurityGroupIds") def custom_security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Ids for a set of security groups to apply to the layer's instances. """ return pulumi.get(self, "custom_security_group_ids") @custom_security_group_ids.setter def custom_security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_security_group_ids", value) @property @pulumi.getter(name="customSetupRecipes") def custom_setup_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_setup_recipes") @custom_setup_recipes.setter def custom_setup_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_setup_recipes", value) @property @pulumi.getter(name="customShutdownRecipes") def custom_shutdown_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_shutdown_recipes") @custom_shutdown_recipes.setter def custom_shutdown_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_shutdown_recipes", value) @property @pulumi.getter(name="customUndeployRecipes") def custom_undeploy_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_undeploy_recipes") @custom_undeploy_recipes.setter def custom_undeploy_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_undeploy_recipes", value) @property @pulumi.getter(name="drainElbOnShutdown") def drain_elb_on_shutdown(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable Elastic Load Balancing connection draining. """ return pulumi.get(self, "drain_elb_on_shutdown") @drain_elb_on_shutdown.setter def drain_elb_on_shutdown(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "drain_elb_on_shutdown", value) @property @pulumi.getter(name="ebsVolumes") def ebs_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]]]: """ `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances. """ return pulumi.get(self, "ebs_volumes") @ebs_volumes.setter def ebs_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]]]): pulumi.set(self, "ebs_volumes", value) @property @pulumi.getter(name="elasticLoadBalancer") def elastic_load_balancer(self) -> Optional[pulumi.Input[str]]: """ Name of an Elastic Load Balancer to attach to this layer """ return pulumi.get(self, "elastic_load_balancer") @elastic_load_balancer.setter def elastic_load_balancer(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "elastic_load_balancer", value) @property @pulumi.getter(name="installUpdatesOnBoot") def install_updates_on_boot(self) -> Optional[pulumi.Input[bool]]: """ Whether to install OS and package updates on each instance when it boots. """ return pulumi.get(self, "install_updates_on_boot") @install_updates_on_boot.setter def install_updates_on_boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "install_updates_on_boot", value) @property @pulumi.getter(name="instanceShutdownTimeout") def instance_shutdown_timeout(self) -> Optional[pulumi.Input[int]]: """ The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. """ return pulumi.get(self, "instance_shutdown_timeout") @instance_shutdown_timeout.setter def instance_shutdown_timeout(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "instance_shutdown_timeout", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ A human-readable name for the layer. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="systemPackages") def system_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of a set of system packages to install on the layer's instances. """ return pulumi.get(self, "system_packages") @system_packages.setter def system_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "system_packages", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="useEbsOptimizedInstances") def use_ebs_optimized_instances(self) -> Optional[pulumi.Input[bool]]: """ Whether to use EBS-optimized instances. """ return pulumi.get(self, "use_ebs_optimized_instances") @use_ebs_optimized_instances.setter def use_ebs_optimized_instances(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "use_ebs_optimized_instances", value) @pulumi.input_type class _StaticWebLayerState: def __init__(__self__, *, arn: Optional[pulumi.Input[str]] = None, auto_assign_elastic_ips: Optional[pulumi.Input[bool]] = None, auto_assign_public_ips: Optional[pulumi.Input[bool]] = None, auto_healing: Optional[pulumi.Input[bool]] = None, cloudwatch_configuration: Optional[pulumi.Input['StaticWebLayerCloudwatchConfigurationArgs']] = None, custom_configure_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_deploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_instance_profile_arn: Optional[pulumi.Input[str]] = None, custom_json: Optional[pulumi.Input[str]] = None, custom_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_setup_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_shutdown_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_undeploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, drain_elb_on_shutdown: Optional[pulumi.Input[bool]] = None, ebs_volumes: Optional[pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]]] = None, elastic_load_balancer: Optional[pulumi.Input[str]] = None, install_updates_on_boot: Optional[pulumi.Input[bool]] = None, instance_shutdown_timeout: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, stack_id: Optional[pulumi.Input[str]] = None, system_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, use_ebs_optimized_instances: Optional[pulumi.Input[bool]] = None): """ Input properties used for looking up and filtering StaticWebLayer resources. :param pulumi.Input[str] arn: The Amazon Resource Name(ARN) of the layer. :param pulumi.Input[bool] auto_assign_elastic_ips: Whether to automatically assign an elastic IP address to the layer's instances. :param pulumi.Input[bool] auto_assign_public_ips: For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. :param pulumi.Input[bool] auto_healing: Whether to enable auto-healing for the layer. :param pulumi.Input[str] custom_instance_profile_arn: The ARN of an IAM profile that will be used for the layer's instances. :param pulumi.Input[Sequence[pulumi.Input[str]]] custom_security_group_ids: Ids for a set of security groups to apply to the layer's instances. :param pulumi.Input[bool] drain_elb_on_shutdown: Whether to enable Elastic Load Balancing connection draining. :param pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]] ebs_volumes: `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances. :param pulumi.Input[str] elastic_load_balancer: Name of an Elastic Load Balancer to attach to this layer :param pulumi.Input[bool] install_updates_on_boot: Whether to install OS and package updates on each instance when it boots. :param pulumi.Input[int] instance_shutdown_timeout: The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. :param pulumi.Input[str] name: A human-readable name for the layer. :param pulumi.Input[str] stack_id: The id of the stack the layer will belong to. :param pulumi.Input[Sequence[pulumi.Input[str]]] system_packages: Names of a set of system packages to install on the layer's instances. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider . :param pulumi.Input[bool] use_ebs_optimized_instances: Whether to use EBS-optimized instances. """ if arn is not None: pulumi.set(__self__, "arn", arn) if auto_assign_elastic_ips is not None: pulumi.set(__self__, "auto_assign_elastic_ips", auto_assign_elastic_ips) if auto_assign_public_ips is not None: pulumi.set(__self__, "auto_assign_public_ips", auto_assign_public_ips) if auto_healing is not None: pulumi.set(__self__, "auto_healing", auto_healing) if cloudwatch_configuration is not None: pulumi.set(__self__, "cloudwatch_configuration", cloudwatch_configuration) if custom_configure_recipes is not None: pulumi.set(__self__, "custom_configure_recipes", custom_configure_recipes) if custom_deploy_recipes is not None: pulumi.set(__self__, "custom_deploy_recipes", custom_deploy_recipes) if custom_instance_profile_arn is not None: pulumi.set(__self__, "custom_instance_profile_arn", custom_instance_profile_arn) if custom_json is not None: pulumi.set(__self__, "custom_json", custom_json) if custom_security_group_ids is not None: pulumi.set(__self__, "custom_security_group_ids", custom_security_group_ids) if custom_setup_recipes is not None: pulumi.set(__self__, "custom_setup_recipes", custom_setup_recipes) if custom_shutdown_recipes is not None: pulumi.set(__self__, "custom_shutdown_recipes", custom_shutdown_recipes) if custom_undeploy_recipes is not None: pulumi.set(__self__, "custom_undeploy_recipes", custom_undeploy_recipes) if drain_elb_on_shutdown is not None: pulumi.set(__self__, "drain_elb_on_shutdown", drain_elb_on_shutdown) if ebs_volumes is not None: pulumi.set(__self__, "ebs_volumes", ebs_volumes) if elastic_load_balancer is not None: pulumi.set(__self__, "elastic_load_balancer", elastic_load_balancer) if install_updates_on_boot is not None: pulumi.set(__self__, "install_updates_on_boot", install_updates_on_boot) if instance_shutdown_timeout is not None: pulumi.set(__self__, "instance_shutdown_timeout", instance_shutdown_timeout) if name is not None: pulumi.set(__self__, "name", name) if stack_id is not None: pulumi.set(__self__, "stack_id", stack_id) if system_packages is not None: pulumi.set(__self__, "system_packages", system_packages) if tags is not None: pulumi.set(__self__, "tags", tags) if tags_all is not None: pulumi.set(__self__, "tags_all", tags_all) if use_ebs_optimized_instances is not None: pulumi.set(__self__, "use_ebs_optimized_instances", use_ebs_optimized_instances) @property @pulumi.getter def arn(self) -> Optional[pulumi.Input[str]]: """ The Amazon Resource Name(ARN) of the layer. """ return pulumi.get(self, "arn") @arn.setter def arn(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "arn", value) @property @pulumi.getter(name="autoAssignElasticIps") def auto_assign_elastic_ips(self) -> Optional[pulumi.Input[bool]]: """ Whether to automatically assign an elastic IP address to the layer's instances. """ return pulumi.get(self, "auto_assign_elastic_ips") @auto_assign_elastic_ips.setter def auto_assign_elastic_ips(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_assign_elastic_ips", value) @property @pulumi.getter(name="autoAssignPublicIps") def auto_assign_public_ips(self) -> Optional[pulumi.Input[bool]]: """ For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. """ return pulumi.get(self, "auto_assign_public_ips") @auto_assign_public_ips.setter def auto_assign_public_ips(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_assign_public_ips", value) @property @pulumi.getter(name="autoHealing") def auto_healing(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable auto-healing for the layer. """ return pulumi.get(self, "auto_healing") @auto_healing.setter def auto_healing(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "auto_healing", value) @property @pulumi.getter(name="cloudwatchConfiguration") def cloudwatch_configuration(self) -> Optional[pulumi.Input['StaticWebLayerCloudwatchConfigurationArgs']]: return pulumi.get(self, "cloudwatch_configuration") @cloudwatch_configuration.setter def cloudwatch_configuration(self, value: Optional[pulumi.Input['StaticWebLayerCloudwatchConfigurationArgs']]): pulumi.set(self, "cloudwatch_configuration", value) @property @pulumi.getter(name="customConfigureRecipes") def custom_configure_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_configure_recipes") @custom_configure_recipes.setter def custom_configure_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_configure_recipes", value) @property @pulumi.getter(name="customDeployRecipes") def custom_deploy_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_deploy_recipes") @custom_deploy_recipes.setter def custom_deploy_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_deploy_recipes", value) @property @pulumi.getter(name="customInstanceProfileArn") def custom_instance_profile_arn(self) -> Optional[pulumi.Input[str]]: """ The ARN of an IAM profile that will be used for the layer's instances. """ return pulumi.get(self, "custom_instance_profile_arn") @custom_instance_profile_arn.setter def custom_instance_profile_arn(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_instance_profile_arn", value) @property @pulumi.getter(name="customJson") def custom_json(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "custom_json") @custom_json.setter def custom_json(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "custom_json", value) @property @pulumi.getter(name="customSecurityGroupIds") def custom_security_group_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Ids for a set of security groups to apply to the layer's instances. """ return pulumi.get(self, "custom_security_group_ids") @custom_security_group_ids.setter def custom_security_group_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_security_group_ids", value) @property @pulumi.getter(name="customSetupRecipes") def custom_setup_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_setup_recipes") @custom_setup_recipes.setter def custom_setup_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_setup_recipes", value) @property @pulumi.getter(name="customShutdownRecipes") def custom_shutdown_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_shutdown_recipes") @custom_shutdown_recipes.setter def custom_shutdown_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_shutdown_recipes", value) @property @pulumi.getter(name="customUndeployRecipes") def custom_undeploy_recipes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: return pulumi.get(self, "custom_undeploy_recipes") @custom_undeploy_recipes.setter def custom_undeploy_recipes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "custom_undeploy_recipes", value) @property @pulumi.getter(name="drainElbOnShutdown") def drain_elb_on_shutdown(self) -> Optional[pulumi.Input[bool]]: """ Whether to enable Elastic Load Balancing connection draining. """ return pulumi.get(self, "drain_elb_on_shutdown") @drain_elb_on_shutdown.setter def drain_elb_on_shutdown(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "drain_elb_on_shutdown", value) @property @pulumi.getter(name="ebsVolumes") def ebs_volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]]]: """ `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances. """ return pulumi.get(self, "ebs_volumes") @ebs_volumes.setter def ebs_volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StaticWebLayerEbsVolumeArgs']]]]): pulumi.set(self, "ebs_volumes", value) @property @pulumi.getter(name="elasticLoadBalancer") def elastic_load_balancer(self) -> Optional[pulumi.Input[str]]: """ Name of an Elastic Load Balancer to attach to this layer """ return pulumi.get(self, "elastic_load_balancer") @elastic_load_balancer.setter def elastic_load_balancer(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "elastic_load_balancer", value) @property @pulumi.getter(name="installUpdatesOnBoot") def install_updates_on_boot(self) -> Optional[pulumi.Input[bool]]: """ Whether to install OS and package updates on each instance when it boots. """ return pulumi.get(self, "install_updates_on_boot") @install_updates_on_boot.setter def install_updates_on_boot(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "install_updates_on_boot", value) @property @pulumi.getter(name="instanceShutdownTimeout") def instance_shutdown_timeout(self) -> Optional[pulumi.Input[int]]: """ The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. """ return pulumi.get(self, "instance_shutdown_timeout") @instance_shutdown_timeout.setter def instance_shutdown_timeout(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "instance_shutdown_timeout", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ A human-readable name for the layer. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="stackId") def stack_id(self) -> Optional[pulumi.Input[str]]: """ The id of the stack the layer will belong to. """ return pulumi.get(self, "stack_id") @stack_id.setter def stack_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "stack_id", value) @property @pulumi.getter(name="systemPackages") def system_packages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Names of a set of system packages to install on the layer's instances. """ return pulumi.get(self, "system_packages") @system_packages.setter def system_packages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "system_packages", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="tagsAll") def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A map of tags assigned to the resource, including those inherited from the provider . """ return pulumi.get(self, "tags_all") @tags_all.setter def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags_all", value) @property @pulumi.getter(name="useEbsOptimizedInstances") def use_ebs_optimized_instances(self) -> Optional[pulumi.Input[bool]]: """ Whether to use EBS-optimized instances. """ return pulumi.get(self, "use_ebs_optimized_instances") @use_ebs_optimized_instances.setter def use_ebs_optimized_instances(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "use_ebs_optimized_instances", value) class StaticWebLayer(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, auto_assign_elastic_ips: Optional[pulumi.Input[bool]] = None, auto_assign_public_ips: Optional[pulumi.Input[bool]] = None, auto_healing: Optional[pulumi.Input[bool]] = None, cloudwatch_configuration: Optional[pulumi.Input[pulumi.InputType['StaticWebLayerCloudwatchConfigurationArgs']]] = None, custom_configure_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_deploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_instance_profile_arn: Optional[pulumi.Input[str]] = None, custom_json: Optional[pulumi.Input[str]] = None, custom_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_setup_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_shutdown_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_undeploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, drain_elb_on_shutdown: Optional[pulumi.Input[bool]] = None, ebs_volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StaticWebLayerEbsVolumeArgs']]]]] = None, elastic_load_balancer: Optional[pulumi.Input[str]] = None, install_updates_on_boot: Optional[pulumi.Input[bool]] = None, instance_shutdown_timeout: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, stack_id: Optional[pulumi.Input[str]] = None, system_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, use_ebs_optimized_instances: Optional[pulumi.Input[bool]] = None, __props__=None): """ Provides an OpsWorks static web server layer resource. ## Example Usage ```python import pulumi import pulumi_aws as aws web = aws.opsworks.StaticWebLayer("web", stack_id=aws_opsworks_stack["main"]["id"]) ``` ## Import OpsWorks static web server Layers can be imported using the `id`, e.g., ```sh $ pulumi import aws:opsworks/staticWebLayer:StaticWebLayer bar 00000000-0000-0000-0000-000000000000 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] auto_assign_elastic_ips: Whether to automatically assign an elastic IP address to the layer's instances. :param pulumi.Input[bool] auto_assign_public_ips: For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. :param pulumi.Input[bool] auto_healing: Whether to enable auto-healing for the layer. :param pulumi.Input[str] custom_instance_profile_arn: The ARN of an IAM profile that will be used for the layer's instances. :param pulumi.Input[Sequence[pulumi.Input[str]]] custom_security_group_ids: Ids for a set of security groups to apply to the layer's instances. :param pulumi.Input[bool] drain_elb_on_shutdown: Whether to enable Elastic Load Balancing connection draining. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StaticWebLayerEbsVolumeArgs']]]] ebs_volumes: `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances. :param pulumi.Input[str] elastic_load_balancer: Name of an Elastic Load Balancer to attach to this layer :param pulumi.Input[bool] install_updates_on_boot: Whether to install OS and package updates on each instance when it boots. :param pulumi.Input[int] instance_shutdown_timeout: The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. :param pulumi.Input[str] name: A human-readable name for the layer. :param pulumi.Input[str] stack_id: The id of the stack the layer will belong to. :param pulumi.Input[Sequence[pulumi.Input[str]]] system_packages: Names of a set of system packages to install on the layer's instances. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[bool] use_ebs_optimized_instances: Whether to use EBS-optimized instances. """ ... @overload def __init__(__self__, resource_name: str, args: StaticWebLayerArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Provides an OpsWorks static web server layer resource. ## Example Usage ```python import pulumi import pulumi_aws as aws web = aws.opsworks.StaticWebLayer("web", stack_id=aws_opsworks_stack["main"]["id"]) ``` ## Import OpsWorks static web server Layers can be imported using the `id`, e.g., ```sh $ pulumi import aws:opsworks/staticWebLayer:StaticWebLayer bar 00000000-0000-0000-0000-000000000000 ``` :param str resource_name: The name of the resource. :param StaticWebLayerArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(StaticWebLayerArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, auto_assign_elastic_ips: Optional[pulumi.Input[bool]] = None, auto_assign_public_ips: Optional[pulumi.Input[bool]] = None, auto_healing: Optional[pulumi.Input[bool]] = None, cloudwatch_configuration: Optional[pulumi.Input[pulumi.InputType['StaticWebLayerCloudwatchConfigurationArgs']]] = None, custom_configure_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_deploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_instance_profile_arn: Optional[pulumi.Input[str]] = None, custom_json: Optional[pulumi.Input[str]] = None, custom_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_setup_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_shutdown_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_undeploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, drain_elb_on_shutdown: Optional[pulumi.Input[bool]] = None, ebs_volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StaticWebLayerEbsVolumeArgs']]]]] = None, elastic_load_balancer: Optional[pulumi.Input[str]] = None, install_updates_on_boot: Optional[pulumi.Input[bool]] = None, instance_shutdown_timeout: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, stack_id: Optional[pulumi.Input[str]] = None, system_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, use_ebs_optimized_instances: Optional[pulumi.Input[bool]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = StaticWebLayerArgs.__new__(StaticWebLayerArgs) __props__.__dict__["auto_assign_elastic_ips"] = auto_assign_elastic_ips __props__.__dict__["auto_assign_public_ips"] = auto_assign_public_ips __props__.__dict__["auto_healing"] = auto_healing __props__.__dict__["cloudwatch_configuration"] = cloudwatch_configuration __props__.__dict__["custom_configure_recipes"] = custom_configure_recipes __props__.__dict__["custom_deploy_recipes"] = custom_deploy_recipes __props__.__dict__["custom_instance_profile_arn"] = custom_instance_profile_arn __props__.__dict__["custom_json"] = custom_json __props__.__dict__["custom_security_group_ids"] = custom_security_group_ids __props__.__dict__["custom_setup_recipes"] = custom_setup_recipes __props__.__dict__["custom_shutdown_recipes"] = custom_shutdown_recipes __props__.__dict__["custom_undeploy_recipes"] = custom_undeploy_recipes __props__.__dict__["drain_elb_on_shutdown"] = drain_elb_on_shutdown __props__.__dict__["ebs_volumes"] = ebs_volumes __props__.__dict__["elastic_load_balancer"] = elastic_load_balancer __props__.__dict__["install_updates_on_boot"] = install_updates_on_boot __props__.__dict__["instance_shutdown_timeout"] = instance_shutdown_timeout __props__.__dict__["name"] = name if stack_id is None and not opts.urn: raise TypeError("Missing required property 'stack_id'") __props__.__dict__["stack_id"] = stack_id __props__.__dict__["system_packages"] = system_packages __props__.__dict__["tags"] = tags __props__.__dict__["use_ebs_optimized_instances"] = use_ebs_optimized_instances __props__.__dict__["arn"] = None __props__.__dict__["tags_all"] = None super(StaticWebLayer, __self__).__init__( 'aws:opsworks/staticWebLayer:StaticWebLayer', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, arn: Optional[pulumi.Input[str]] = None, auto_assign_elastic_ips: Optional[pulumi.Input[bool]] = None, auto_assign_public_ips: Optional[pulumi.Input[bool]] = None, auto_healing: Optional[pulumi.Input[bool]] = None, cloudwatch_configuration: Optional[pulumi.Input[pulumi.InputType['StaticWebLayerCloudwatchConfigurationArgs']]] = None, custom_configure_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_deploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_instance_profile_arn: Optional[pulumi.Input[str]] = None, custom_json: Optional[pulumi.Input[str]] = None, custom_security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_setup_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_shutdown_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, custom_undeploy_recipes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, drain_elb_on_shutdown: Optional[pulumi.Input[bool]] = None, ebs_volumes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StaticWebLayerEbsVolumeArgs']]]]] = None, elastic_load_balancer: Optional[pulumi.Input[str]] = None, install_updates_on_boot: Optional[pulumi.Input[bool]] = None, instance_shutdown_timeout: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, stack_id: Optional[pulumi.Input[str]] = None, system_packages: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, use_ebs_optimized_instances: Optional[pulumi.Input[bool]] = None) -> 'StaticWebLayer': """ Get an existing StaticWebLayer resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] arn: The Amazon Resource Name(ARN) of the layer. :param pulumi.Input[bool] auto_assign_elastic_ips: Whether to automatically assign an elastic IP address to the layer's instances. :param pulumi.Input[bool] auto_assign_public_ips: For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. :param pulumi.Input[bool] auto_healing: Whether to enable auto-healing for the layer. :param pulumi.Input[str] custom_instance_profile_arn: The ARN of an IAM profile that will be used for the layer's instances. :param pulumi.Input[Sequence[pulumi.Input[str]]] custom_security_group_ids: Ids for a set of security groups to apply to the layer's instances. :param pulumi.Input[bool] drain_elb_on_shutdown: Whether to enable Elastic Load Balancing connection draining. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StaticWebLayerEbsVolumeArgs']]]] ebs_volumes: `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances. :param pulumi.Input[str] elastic_load_balancer: Name of an Elastic Load Balancer to attach to this layer :param pulumi.Input[bool] install_updates_on_boot: Whether to install OS and package updates on each instance when it boots. :param pulumi.Input[int] instance_shutdown_timeout: The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. :param pulumi.Input[str] name: A human-readable name for the layer. :param pulumi.Input[str] stack_id: The id of the stack the layer will belong to. :param pulumi.Input[Sequence[pulumi.Input[str]]] system_packages: Names of a set of system packages to install on the layer's instances. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider . :param pulumi.Input[bool] use_ebs_optimized_instances: Whether to use EBS-optimized instances. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _StaticWebLayerState.__new__(_StaticWebLayerState) __props__.__dict__["arn"] = arn __props__.__dict__["auto_assign_elastic_ips"] = auto_assign_elastic_ips __props__.__dict__["auto_assign_public_ips"] = auto_assign_public_ips __props__.__dict__["auto_healing"] = auto_healing __props__.__dict__["cloudwatch_configuration"] = cloudwatch_configuration __props__.__dict__["custom_configure_recipes"] = custom_configure_recipes __props__.__dict__["custom_deploy_recipes"] = custom_deploy_recipes __props__.__dict__["custom_instance_profile_arn"] = custom_instance_profile_arn __props__.__dict__["custom_json"] = custom_json __props__.__dict__["custom_security_group_ids"] = custom_security_group_ids __props__.__dict__["custom_setup_recipes"] = custom_setup_recipes __props__.__dict__["custom_shutdown_recipes"] = custom_shutdown_recipes __props__.__dict__["custom_undeploy_recipes"] = custom_undeploy_recipes __props__.__dict__["drain_elb_on_shutdown"] = drain_elb_on_shutdown __props__.__dict__["ebs_volumes"] = ebs_volumes __props__.__dict__["elastic_load_balancer"] = elastic_load_balancer __props__.__dict__["install_updates_on_boot"] = install_updates_on_boot __props__.__dict__["instance_shutdown_timeout"] = instance_shutdown_timeout __props__.__dict__["name"] = name __props__.__dict__["stack_id"] = stack_id __props__.__dict__["system_packages"] = system_packages __props__.__dict__["tags"] = tags __props__.__dict__["tags_all"] = tags_all __props__.__dict__["use_ebs_optimized_instances"] = use_ebs_optimized_instances return StaticWebLayer(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def arn(self) -> pulumi.Output[str]: """ The Amazon Resource Name(ARN) of the layer. """ return pulumi.get(self, "arn") @property @pulumi.getter(name="autoAssignElasticIps") def auto_assign_elastic_ips(self) -> pulumi.Output[Optional[bool]]: """ Whether to automatically assign an elastic IP address to the layer's instances. """ return pulumi.get(self, "auto_assign_elastic_ips") @property @pulumi.getter(name="autoAssignPublicIps") def auto_assign_public_ips(self) -> pulumi.Output[Optional[bool]]: """ For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances. """ return pulumi.get(self, "auto_assign_public_ips") @property @pulumi.getter(name="autoHealing") def auto_healing(self) -> pulumi.Output[Optional[bool]]: """ Whether to enable auto-healing for the layer. """ return pulumi.get(self, "auto_healing") @property @pulumi.getter(name="cloudwatchConfiguration") def cloudwatch_configuration(self) -> pulumi.Output[Optional['outputs.StaticWebLayerCloudwatchConfiguration']]: return pulumi.get(self, "cloudwatch_configuration") @property @pulumi.getter(name="customConfigureRecipes") def custom_configure_recipes(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "custom_configure_recipes") @property @pulumi.getter(name="customDeployRecipes") def custom_deploy_recipes(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "custom_deploy_recipes") @property @pulumi.getter(name="customInstanceProfileArn") def custom_instance_profile_arn(self) -> pulumi.Output[Optional[str]]: """ The ARN of an IAM profile that will be used for the layer's instances. """ return pulumi.get(self, "custom_instance_profile_arn") @property @pulumi.getter(name="customJson") def custom_json(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "custom_json") @property @pulumi.getter(name="customSecurityGroupIds") def custom_security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]: """ Ids for a set of security groups to apply to the layer's instances. """ return pulumi.get(self, "custom_security_group_ids") @property @pulumi.getter(name="customSetupRecipes") def custom_setup_recipes(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "custom_setup_recipes") @property @pulumi.getter(name="customShutdownRecipes") def custom_shutdown_recipes(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "custom_shutdown_recipes") @property @pulumi.getter(name="customUndeployRecipes") def custom_undeploy_recipes(self) -> pulumi.Output[Optional[Sequence[str]]]: return pulumi.get(self, "custom_undeploy_recipes") @property @pulumi.getter(name="drainElbOnShutdown") def drain_elb_on_shutdown(self) -> pulumi.Output[Optional[bool]]: """ Whether to enable Elastic Load Balancing connection draining. """ return pulumi.get(self, "drain_elb_on_shutdown") @property @pulumi.getter(name="ebsVolumes") def ebs_volumes(self) -> pulumi.Output[Optional[Sequence['outputs.StaticWebLayerEbsVolume']]]: """ `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances. """ return pulumi.get(self, "ebs_volumes") @property @pulumi.getter(name="elasticLoadBalancer") def elastic_load_balancer(self) -> pulumi.Output[Optional[str]]: """ Name of an Elastic Load Balancer to attach to this layer """ return pulumi.get(self, "elastic_load_balancer") @property @pulumi.getter(name="installUpdatesOnBoot") def install_updates_on_boot(self) -> pulumi.Output[Optional[bool]]: """ Whether to install OS and package updates on each instance when it boots. """ return pulumi.get(self, "install_updates_on_boot") @property @pulumi.getter(name="instanceShutdownTimeout") def instance_shutdown_timeout(self) -> pulumi.Output[Optional[int]]: """ The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event. """ return pulumi.get(self, "instance_shutdown_timeout") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ A human-readable name for the layer. """ return pulumi.get(self, "name") @property @pulumi.getter(name="stackId") def stack_id(self) -> pulumi.Output[str]: """ The id of the stack the layer will belong to. """ return pulumi.get(self, "stack_id") @property @pulumi.getter(name="systemPackages") def system_packages(self) -> pulumi.Output[Optional[Sequence[str]]]: """ Names of a set of system packages to install on the layer's instances. """ return pulumi.get(self, "system_packages") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. """ return pulumi.get(self, "tags") @property @pulumi.getter(name="tagsAll") def tags_all(self) -> pulumi.Output[Mapping[str, str]]: """ A map of tags assigned to the resource, including those inherited from the provider . """ return pulumi.get(self, "tags_all") @property @pulumi.getter(name="useEbsOptimizedInstances") def use_ebs_optimized_instances(self) -> pulumi.Output[Optional[bool]]: """ Whether to use EBS-optimized instances. """ return pulumi.get(self, "use_ebs_optimized_instances")
51.748461
258
0.690608
4a014568b422f6c0b610015eea2a842d8ab46ef2
656
py
Python
GWP/2D/1.1.2/resample/cor.py
binggu56/qmd
e2628710de15f8a8b9a1280fcf92f9e87559414c
[ "MIT" ]
null
null
null
GWP/2D/1.1.2/resample/cor.py
binggu56/qmd
e2628710de15f8a8b9a1280fcf92f9e87559414c
[ "MIT" ]
null
null
null
GWP/2D/1.1.2/resample/cor.py
binggu56/qmd
e2628710de15f8a8b9a1280fcf92f9e87559414c
[ "MIT" ]
null
null
null
##!/usr/bin/python import numpy as np import pylab as plt import seaborn as sns sns.set_context('poster') plt.subplot(1,1,1) data = np.genfromtxt(fname='cor.dat') for x in range(1,data.shape[-1]): plt.plot(data[:,0],data[:,x],'k--',lw=2) plt.xlabel('$time$') plt.ylabel('$C(t)$') #plt.title('traj') #plt.subplot(2,1,2) data = np.genfromtxt(fname='/home/bing/gwp/spo_2d/1.0.3/cor1') # ##for x in range(1,3): plt.plot(data[:,0],data[:,1],label='$\Re(C(t))$',lw=1) plt.plot(data[:,0],data[:,2],label='$\Im(C(t))$',lw=1) #z = np.sqrt(data[:,1]**2+data[:,2]**2) #plt.plot(data[:,0],z,label='$|C(t)|$',lw=1) plt.xlim(0,4) plt.legend() plt.show()
21.16129
63
0.603659
4a0145b9a9f5cc3818640d0aeae3806cf4d572f0
379
py
Python
sp_api/base/sales_enum.py
Camille-cmd/python-amazon-sp-api
00a169a0b16700e1498550742f384de74fca4aa3
[ "MIT" ]
213
2020-12-20T09:43:47.000Z
2022-03-30T10:00:41.000Z
sp_api/base/sales_enum.py
Camille-cmd/python-amazon-sp-api
00a169a0b16700e1498550742f384de74fca4aa3
[ "MIT" ]
173
2021-01-02T10:24:27.000Z
2022-03-31T08:27:48.000Z
sp_api/base/sales_enum.py
Camille-cmd/python-amazon-sp-api
00a169a0b16700e1498550742f384de74fca4aa3
[ "MIT" ]
109
2021-01-06T03:16:47.000Z
2022-03-24T11:41:29.000Z
from enum import Enum class Granularity(str, Enum): HOUR = 'Hour' DAY = 'Day' WEEK = 'Week' MONTH = 'Month' YEAR = 'Year' TOTAL = 'Total' class BuyerType(str, Enum): B2B = 'B2B' # Business to business. B2C = 'B2C' # Business to customer. ALL = 'All' # Both of above class FirstDayOfWeek(str, Enum): MO = 'Monday' SU = 'Sunday'
15.791667
39
0.577836
4a0145da101f32c2a7f335df35cd0a891186b29c
869
py
Python
rect_win.py
aletbm/PruebaPSD
8038ed129e172c6a28886e459d2f7d2be822e0e2
[ "MIT" ]
null
null
null
rect_win.py
aletbm/PruebaPSD
8038ed129e172c6a28886e459d2f7d2be822e0e2
[ "MIT" ]
null
null
null
rect_win.py
aletbm/PruebaPSD
8038ed129e172c6a28886e459d2f7d2be822e0e2
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Aug 26 20:44:40 2020 @author: Mariano Llamedo Soria llamedom@frba.utn.edu.ar Analizamos el módulo de una ventana rectangular de acuerdo al resultado calculado algebraicamente. """ import numpy as np import matplotlib.pyplot as plt import scipy.signal as sig N = 1000 kk = np.arange(start=-10, stop=10, step= 1/10 ) kernel_dirchlet = np.sin(np.pi * kk) / np.sin(np.pi * kk / N) # kernel_dirchlet[ kk == 0 ] = N plt.close('all') plt.figure(1) plt.plot(kk, np.abs(kernel_dirchlet)) plt.plot( np.array([-10, 10]), np.array([0, 0]), ':k' ) # plt.plot( np.array([0, N]), np.array([q/2, q/2]), '--r' ) # plt.title( 'Ruido de cuantización para {:d} bits - V_R={:3.1f} V - q = {:3.3f} V'.format(B, Vref, q)) # plt.ylabel('Amplitud ruido [V]') # plt.xlabel('Muestras [#]')
27.15625
104
0.627158
4a0145fce79484389cf7ba42ab97d17c7cc56b9c
17,653
py
Python
sonnet_finder.py
mbollmann/sonnet-finder
8f74b0ce21b62309f33159474a4130be23b0fa87
[ "MIT" ]
10
2021-08-20T10:36:15.000Z
2021-11-03T08:16:04.000Z
sonnet_finder.py
mbollmann/sonnet-finder
8f74b0ce21b62309f33159474a4130be23b0fa87
[ "MIT" ]
null
null
null
sonnet_finder.py
mbollmann/sonnet-finder
8f74b0ce21b62309f33159474a4130be23b0fa87
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """Usage: sonnet-finder.py TEXTFILE [options] Finds snippets in iambic pentameter in an English-language text and tries to combine them to a rhyming sonnet. Arguments: TEXTFILE Text file to produce a sonnet from. Options: -o, --output TSVFILE If given, writes all found candidate phrases along with their slant rhyme classes to OUTFILE, in tsv format. -s, --similarity SCORE Similarity threshold for slant rhymes; suggested values: * 0.0 for more conservative rhyming * -0.6 for the value used by Ghazvininejad et al. [default: -0.6] --debug Output debug-level info about what the script is doing. --help This helpful text. """ from docopt import docopt import logging from collections import defaultdict import string import random from rich.console import Console from rich.logging import RichHandler from rich.progress import track logging.basicConfig( level="INFO", format="%(message)s", datefmt="[%X]", handlers=[RichHandler(markup=True)], ) log = logging.getLogger("rich") console = Console() from g2p_en import G2p from g2p_en.expand import normalize_numbers import re import unicodedata import nltk from nltk.tokenize import TweetTokenizer word_tokenize = TweetTokenizer().tokenize try: nltk.data.find("tokenizers/punkt/english.pickle") except LookupError: nltk.download("punkt") sent_splitter = nltk.data.load("tokenizers/punkt/english.pickle") sentence_split = sent_splitter.tokenize iambic_pentameter = re.compile(r"(?=(01){5})") # Similarity scores for non-equal phoneme pairs, taken from Hirjee & Brown (2010), # Tables 1 & 2. I only wrote down pairs with a score of -1.0 or higher. # Used to determine which phoneme pairs are "similar enough" to produce slant # rhymes. Paper: # <https://kb.osu.edu/bitstream/handle/1811/48548/1/EMR000091a-Hirjee_Brown.pdf> SIMILARITY_SCORES = { ("AA", "AH"): -0.8, ("AA", "AO"): 1.6, ("AA", "ER"): -0.6, ("AA", "OW"): -1.0, ("AH", "EH"): -0.6, ("AH", "ER"): -0.2, ("AH", "IH"): -0.3, ("AH", "OW"): -1.0, ("AH", "OY"): -0.6, ("AH", "UH"): -0.9, ("AO", "AW"): -1.0, ("AO", "OW"): -0.3, ("AO", "OY"): -0.4, ("AO", "UH"): 1.1, ("AW", "AY"): -0.3, ("EH", "IH"): 0.2, ("IH", "IY"): -0.9, ("IH", "OY"): 0.2, ("OY", "UH"): 0.1, ("UH", "UW"): -0.5, ("B", "D"): 1.1, ("B", "DH"): 0.4, ("B", "G"): 1.9, ("B", "JH"): 1.9, ("B", "L"): -0.3, ("B", "M"): -0.5, ("B", "P"): 0.1, ("B", "R"): -0.9, ("B", "T"): -1.0, ("B", "V"): 2.3, ("B", "Z"): 0.3, ("CH", "F"): -0.3, ("CH", "G"): 0.2, ("CH", "JH"): 0.4, ("CH", "K"): 1.5, ("CH", "P"): 1.1, ("CH", "S"): 0.3, ("CH", "SH"): 0.6, ("CH", "T"): 0.9, ("CH", "TH"): 1.4, ("D", "G"): 0.1, ("D", "JH"): 0.2, ("D", "R"): -0.9, ("D", "T"): 0.2, ("D", "TH"): 0.0, ("D", "V"): -0.2, ("D", "Z"): 0.0, ("DH", "K"): -0.4, ("DH", "L"): -0.2, ("DH", "T"): -0.3, ("DH", "TH"): 1.3, ("DH", "V"): 2.3, ("DH", "Z"): 1.1, ("F", "K"): -0.3, ("F", "P"): 1.1, ("F", "S"): 1.0, ("F", "SH"): 1.2, ("F", "T"): -0.9, ("F", "TH"): 4.0, ("F", "V"): 0.6, ("G", "JH"): 1.8, ("G", "K"): 0.0, ("G", "L"): -0.2, ("G", "M"): -1.0, ("G", "P"): -0.7, ("G", "R"): -0.8, ("G", "V"): 0.3, ("G", "Z"): -0.3, ("JH", "M"): 0.1, ("JH", "N"): -0.5, ("JH", "P"): -0.2, ("JH", "R"): -0.3, ("JH", "S"): -0.6, ("JH", "SH"): 0.6, ("JH", "V"): 1.4, ("JH", "Z"): 1.0, ("JH", "ZH"): 4.1, ("K", "P"): 1.7, ("K", "S"): -0.7, ("K", "SH"): -0.6, ("K", "T"): 0.9, ("K", "TH"): 0.5, ("L", "R"): -0.5, ("M", "N"): 1.8, ("M", "NG"): 0.7, ("M", "TH"): 0.4, ("M", "V"): -0.6, ("N", "NG"): 1.2, ("N", "R"): -1.0, ("N", "SH"): -0.7, ("N", "TH"): -0.6, ("P", "SH"): -0.7, ("P", "T"): 1.1, ("P", "TH"): 0.9, ("P", "V"): -0.5, ("R", "SH"): -0.8, ("S", "SH"): 2.4, ("S", "T"): -1.0, ("S", "TH"): 1.0, ("S", "Z"): 0.5, ("S", "ZH"): 0.0, ("SH", "T"): -0.6, ("SH", "Z"): -0.2, ("SH", "ZH"): 3.6, ("T", "TH"): 1.6, ("T", "V"): -0.8, ("TH", "V"): 0.5, ("V", "Z"): -0.4, ("V", "ZH"): 1.6, ("Z", "ZH"): 3.0, } SIMILARITY_LIMIT = -0.05 def is_natural_language(line): """Determine if a line is likely to be actual natural language text, as opposed to, e.g., LaTeX formulas or tables. Pretty crude heuristic, but as long as it filters out most of the bad stuff it's okay, I guess.""" line = line.strip() if len(line) < 5: return False if "\\" in line or line[0] == "$" or line[-1] == "$": # probably contains a LaTeX formula; skip return False if line.count(" ") < 2: # maybe a headline, or a fragment return False if " " in line or not line.replace("-", "").strip(): # table? return False return True def get_stress_and_boundaries(pron): """Determines stress patterns and word boundaries.""" # For stress pattern, look at all vowel phonemes, and preserve word # boundaries (for now) pattern = [phon[-1] for phon in pron if phon[-1] in "012" or phon == " "] pattern = ( "".join(pattern) .replace("2", "1") # treat secondary stress like primary stress .replace("100 ", "101 ") # treat 100 at the end of a word like 101, # cf. https://aclanthology.org/D16-1126, §3 ) if pattern[-3:] == "100": # same as above pattern = pattern[:-3] + "101" # For each vowel, produce "1" if it's at the beginning of a word and "0" # otherwise; used to make sure the candidates we extract coincide with word # boundaries bound = "".join( "1" if b == " " else "0" for (a, b) in zip(pattern, " " + pattern) if a != " " ) # For each vowel, remember which word (by index) it belongs to so we can # more easily extract the corresponding words later i = 0 wordidx = [] for p in pattern: if p == " ": i += 1 else: wordidx.append(i) wordidx.append(len(pattern)) pattern = pattern.replace(" ", "") return pattern, bound, wordidx def extract_phrases(line, pron): """Finds candidate phrases in iambic pentameter from a given line and its pronunciation.""" # First, extract the stress pattern and scan for iambic pentameter stress, bound, wordidx = get_stress_and_boundaries(pron) words, phon_by_word = None, None for match in iambic_pentameter.finditer(stress): idx = match.start() # Distinguish two possible types of rhyme: masculine (ends in stressed # vowel) and feminine (ends in an additional unstressed vowel); # depending on where the word boundaries fall, a candidate phrase can be # suitable for only one of these or both has_masc, has_fem = False, False if bound[idx] != "1": continue # start of the pattern does not coincide with word # boundary; discard if len(bound) <= (idx + 10): # pattern is at the end of the line; can # use for masc rhyme but not fem has_masc = True else: if bound[idx + 10] == "1": has_masc = True # pattern ends at word boundary, can use for masc rhyme if stress[idx + 10] == "0" and ( len(bound) <= (idx + 11) or bound[idx + 11] == "1" ): has_fem = True # there is another unstressed vowel after the # pattern and it ends at word boundary, can use # for fem rhyme if not (has_masc or has_fem): continue # The rest of this function just reconstructs the matched section from # `line` and `pron`. We also return the pronunciation in order to check # for rhyme later. if words is None: words = g2p_preprocess(line) phon_by_word = "÷".join(pron).split(" ") if has_masc: phrase = words[wordidx[idx] : wordidx[idx + 10]] if phrase[-1] in string.punctuation: phrase = phrase[:-1] phrase_phon = ( "".join(phon_by_word[wordidx[idx] : wordidx[idx + 10]]) .replace("÷", " ") .split() ) log.debug(" ".join(phrase)) yield (phrase, phrase_phon) if has_fem: phrase = words[wordidx[idx] : wordidx[idx + 11]] if phrase[-1] in string.punctuation: phrase = phrase[:-1] phrase_phon = ( "".join(phon_by_word[wordidx[idx] : wordidx[idx + 11]]) .replace("÷", " ") .split() ) log.debug(" ".join(phrase)) yield (phrase, phrase_phon) def g2p_preprocess(text): """Preprocess text like the g2p_en library does. Unfortunately the library does not encapsulate this in its own function, so we copy the relevant passage here. Used in order to map the stress patterns (detected from the g2p_en output) back to orthographic words. Adapted with minimal changes from <https://github.com/Kyubyong/g2p/blob/c6439c274c42b9724a7fee1dc07ca6a4c68a0538/g2p_en/g2p.py#L148-L161> by Kyubyong Park & Jongseok Kim, licensed under Apache 2.0 <https://www.apache.org/licenses/LICENSE-2.0> """ text = normalize_numbers(text) text = "".join( char for char in unicodedata.normalize("NFD", text) if unicodedata.category(char) != "Mn" ) # Strip accents text = text.lower() text = re.sub("[^ a-z'.,?!\-]", "", text) text = text.replace("i.e.", "that is") text = text.replace("e.g.", "for example") # tokenization words = word_tokenize(text) return words def hash_by_strict_rhyme(candidates): """Computes a dictionary with strict rhyme classes as keys and candidate phrases as values.""" rhyme = defaultdict(set) for (phrase, pron) in candidates: rhyme_class = [] for phoneme in pron[::-1]: if phoneme[0] not in "ABCDEFGHIJKLMNOPRSTUVWYZ": continue phoneme = phoneme.replace("2", "1") rhyme_class.insert(0, phoneme) if phoneme[-1] == "1": break rhyme_class = "—".join(rhyme_class) rhyme[rhyme_class].add(tuple(phrase)) return rhyme def hash_by_slant_rhyme(candidates): """Computes a dictionary with slant rhyme classes as keys and candidate phrases as values. Candidate phrases here actually means a tuple of the orthographic phrase and its full phonemes, in order to be able to analyze the rhyme further later. """ rhyme = defaultdict(set) for (phrase, pron) in candidates: rhyme_class = [] rhyme_phonemes = [] vowels_seen = 0 inserted_placeholder = False for phoneme in pron[::-1]: if phoneme[0] not in "ABCDEFGHIJKLMNOPRSTUVWYZ": continue phoneme = phoneme.replace("2", "1") rhyme_phonemes.insert(0, phoneme) if phoneme[-1] not in "01": if vowels_seen == 0: rhyme_class.insert(0, phoneme) elif vowels_seen == 1 and not inserted_placeholder: rhyme_class.insert(0, "*") inserted_placeholder = True else: pass else: # vowel if vowels_seen < 2: rhyme_class.insert(0, phoneme) if phoneme[-1] == "1": vowels_seen = 2 else: vowels_seen = 1 if vowels_seen > 1: break rhyme_class = "—".join(rhyme_class) rhyme[rhyme_class].add((tuple(phrase), tuple(rhyme_phonemes))) return rhyme def are_similar(phoneme1, phoneme2): """Check if two phonemes are similar enough as determined by the similarity scores and limit (see above).""" if phoneme1 == phoneme2: return True score = SIMILARITY_SCORES.get( (phoneme1, phoneme2), SIMILARITY_SCORES.get((phoneme2, phoneme1), -99) ) return score > SIMILARITY_LIMIT def can_rhyme(phrase1, phrase2): """Check if two phrases can rhyme. This largely attempts to implement the algorithm by Ghazvininejad et al. (2016) <https://aclanthology.org/D16-1126>, Sec. 5.2. I also assume that we know that the two phrases belong to the same slant rhyme class as returned by `hash_by_slant_rhyme`. """ words1, pron1 = phrase1 words2, pron2 = phrase2 assert pron1[0][-1] == "1", " ".join(pron1) assert pron2[0][-1] == "1", " ".join(pron2) # We don't want to "rhyme" identical words if words1[-1] == words2[-1]: return False # Identical, strict rhymes? if "*" not in pron1 and pron1 == pron2: return True # Step 2. Replace ER with UH R while "ER" in pron1: idx = pron1.index("ER") pron1 = pron1[:idx] + ["UH", "R"] + pron1[idx + 1 :] while "ER" in pron2: idx = pron2.index("ER") pron2 = pron2[:idx] + ["UH", "R"] + pron2[idx + 1 :] # Step 3,4,5. Find v-x-w v1, v2 = pron1[0], pron2[0] if v1 != v2: return False # Step 6a; shouldn't happen x1, x2 = [], [] w1, w2 = None, None for phoneme in pron1[:0:-1]: if phoneme[-1] == "0": if w1 is None: w1 = phoneme elif w1 is not None: x1.insert(0, phoneme) for phoneme in pron2[:0:-1]: if phoneme[-1] == "0": if w2 is None: w2 = phoneme elif w2 is not None: x2.insert(0, phoneme) if w1 != w2: return False # Step 6b; shouldn't happen if (not x1) or (not x2): return False # Step 7. if len(x1) == 1 and len(x2) == 1: return are_similar(x1[0], x2[0]) # Step 8. if sum(1 for x in x1 if x[-1] == "0") != sum(1 for x in x2 if x[-1] == "0"): return False # Step 10. if x1[0] == x2[0] and are_similar(x1[-1], x2[-1]): return True # Step 11. if x1[-1] == x2[-1] and are_similar(x1[0], x2[0]): return True return False def main(args): g2p = G2p() with open(args["TEXTFILE"], "r") as f: lines = [l.strip() for l in f if l.strip()] log.debug(f"Read {len(lines)} lines from {args['TEXTFILE']}") candidates = [] for line in track(lines, description="Scanning for iambic pentameter..."): for sentence in sentence_split(line): if not is_natural_language(sentence): continue # split up hyphenated words, as this typically improves the # pronunciation prediction on these words sentence = sentence.replace("-", " - ") pron = g2p(sentence) candidates.extend(phrase for phrase in extract_phrases(sentence, pron)) log.info(f"Extracted {len(candidates)} candidate phrases.") # h_strict = hash_by_strict_rhyme(candidates) # log.info(f"Found {len(h_strict)} different strict rhyme classes.") h_slant = hash_by_slant_rhyme(candidates) log.debug(f"Found {len(h_slant)} different slant rhyme classes.") if args["--output"]: with open(args["--output"], "w") as f: for rhyme_class, examples in h_slant.items(): for phrase, pron in examples: phrase = " ".join(phrase) pron = " ".join(pron) print(f"{phrase}\t{rhyme_class}\t{pron}", file=f) log.info(f"Wrote candidates to [magenta]{args['--output']}[/]") log.debug( f"Trying to find rhyming pairs using similarity limit {SIMILARITY_LIMIT} ..." ) rhyme_pairs = defaultdict(list) for (rhyme_class, phrases) in h_slant.items(): if len(phrases) < 2: continue phrases = list(phrases) random.shuffle(phrases) while len(phrases) > 1: p_1 = phrases.pop() for p_2 in phrases: if can_rhyme(p_1, p_2): rhyme_pairs[rhyme_class].append((p_1[0], p_2[0])) break log.info(f"Found {len(rhyme_pairs)} rhyme classes with paired phrases.") classes = list(rhyme_pairs.keys()) random.shuffle(classes) stanzas = 0 while stanzas < 3 and len(classes) > 1: pair_a = random.choice(rhyme_pairs[classes.pop()]) pair_b = random.choice(rhyme_pairs[classes.pop()]) console.print("") console.print(" ".join(pair_a[0]), style="yellow italic") console.print(" ".join(pair_b[0]), style="yellow italic") console.print(" ".join(pair_a[1]), style="yellow italic") console.print(" ".join(pair_b[1]), style="yellow italic") stanzas += 1 if classes: pair = random.choice(rhyme_pairs[classes.pop()]) console.print("") console.print(" ".join(pair[0]), style="yellow italic") console.print(" ".join(pair[1]), style="yellow italic") console.print("") return 0 if __name__ == "__main__": args = docopt(__doc__) if args["--debug"]: log.setLevel("DEBUG") SIMILARITY_LIMIT = float(args["--similarity"]) r = main(args) exit(r)
31.636201
107
0.536963
4a0146fbe07be8b09766d7e70fab0c1ea8d14ba7
899
py
Python
example/example/urls.py
zmrenwu/django-mptt-comments
14c9b949d93a43c36357660282033f391195f629
[ "MIT" ]
32
2018-11-06T04:10:19.000Z
2020-08-26T02:34:48.000Z
example/example/urls.py
alice314272/django-mptt-comments
14c9b949d93a43c36357660282033f391195f629
[ "MIT" ]
2
2019-05-16T08:16:51.000Z
2020-05-14T14:43:07.000Z
example/example/urls.py
alice314272/django-mptt-comments
14c9b949d93a43c36357660282033f391195f629
[ "MIT" ]
7
2018-11-06T04:15:04.000Z
2020-09-09T10:26:58.000Z
"""example URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'', include('django_mptt_comments.urls')), url(r'', include('blog.urls')), url(r'', include('captcha.urls')), ]
35.96
79
0.689655
4a0147a9aeaad40710b10ddd85c0cc413f4c2654
90,662
py
Python
tests/block_tools.py
Chinilla/chinilla-blockchain
59bebcf94e65b74fbb53ad4929bbd79cb28be619
[ "Apache-2.0" ]
null
null
null
tests/block_tools.py
Chinilla/chinilla-blockchain
59bebcf94e65b74fbb53ad4929bbd79cb28be619
[ "Apache-2.0" ]
null
null
null
tests/block_tools.py
Chinilla/chinilla-blockchain
59bebcf94e65b74fbb53ad4929bbd79cb28be619
[ "Apache-2.0" ]
null
null
null
import asyncio import math import copy import logging import os import random import shutil import ssl import sys import tempfile import time from argparse import Namespace from dataclasses import replace from pathlib import Path from typing import Callable, Dict, List, Optional, Tuple, Any, Union from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey from chiabip158 import PyBIP158 from chinilla.cmds.init_funcs import create_all_ssl, create_default_chinilla_config from chinilla.daemon.keychain_proxy import connect_to_keychain_and_validate, wrap_local_keychain from chinilla.full_node.bundle_tools import ( best_solution_generator_from_template, detect_potential_template_generator, simple_solution_generator, ) from chinilla.util.errors import Err from chinilla.full_node.generator import setup_generator_args from chinilla.full_node.mempool_check_conditions import GENERATOR_MOD from chinilla.plotting.create_plots import create_plots, PlotKeys from chinilla.plotting.util import add_plot_directory from chinilla.consensus.block_creation import unfinished_block_to_full_block from chinilla.consensus.block_record import BlockRecord from chinilla.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward from chinilla.consensus.blockchain_interface import BlockchainInterface from chinilla.consensus.coinbase import create_puzzlehash_for_pk, create_farmer_coin, create_pool_coin from chinilla.consensus.condition_costs import ConditionCost from chinilla.consensus.constants import ConsensusConstants from chinilla.consensus.default_constants import DEFAULT_CONSTANTS from chinilla.consensus.deficit import calculate_deficit from chinilla.consensus.full_block_to_block_record import block_to_block_record from chinilla.consensus.make_sub_epoch_summary import next_sub_epoch_summary from chinilla.consensus.pot_iterations import ( calculate_ip_iters, calculate_iterations_quality, calculate_sp_interval_iters, calculate_sp_iters, is_overflow_block, ) from chinilla.consensus.vdf_info_computation import get_signage_point_vdf_info from chinilla.full_node.signage_point import SignagePoint from chinilla.plotting.util import PlotsRefreshParameter, PlotRefreshResult, PlotRefreshEvents, parse_plot_info from chinilla.plotting.manager import PlotManager from chinilla.server.server import ssl_context_for_client from chinilla.types.blockchain_format.classgroup import ClassgroupElement from chinilla.types.blockchain_format.coin import Coin, hash_coin_ids from chinilla.types.blockchain_format.foliage import ( Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo, ) from chinilla.types.blockchain_format.pool_target import PoolTarget from chinilla.types.blockchain_format.program import INFINITE_COST from chinilla.types.blockchain_format.proof_of_space import ProofOfSpace from chinilla.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished from chinilla.types.blockchain_format.sized_bytes import bytes32 from chinilla.types.blockchain_format.slots import ( ChallengeChainSubSlot, InfusedChallengeChainSubSlot, RewardChainSubSlot, SubSlotProofs, ) from chinilla.types.blockchain_format.sub_epoch_summary import SubEpochSummary from chinilla.types.blockchain_format.vdf import VDFInfo, VDFProof from chinilla.types.condition_opcodes import ConditionOpcode from chinilla.types.end_of_slot_bundle import EndOfSubSlotBundle from chinilla.types.full_block import FullBlock from chinilla.types.generator_types import BlockGenerator, CompressorArg from chinilla.types.spend_bundle import SpendBundle from chinilla.types.unfinished_block import UnfinishedBlock from chinilla.util.bech32m import encode_puzzle_hash from chinilla.util.block_cache import BlockCache from chinilla.util.config import load_config, lock_config, save_config, override_config from chinilla.util.default_root import DEFAULT_ROOT_PATH from chinilla.util.hash import std_hash from chinilla.util.ints import uint8, uint16, uint32, uint64, uint128 from chinilla.util.keychain import Keychain, bytes_to_mnemonic from chinilla.util.prev_transaction_block import get_prev_transaction_block from chinilla.util.path import mkdir from chinilla.util.vdf_prover import get_vdf_info_and_proof from tests.time_out_assert import time_out_assert_custom_interval from tests.wallet_tools import WalletTool from tests.util.socket import find_available_listen_port from tests.util.ssl_certs import get_next_nodes_certs_and_keys, get_next_private_ca_cert_and_key from chinilla.wallet.derive_keys import ( master_sk_to_farmer_sk, master_sk_to_local_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk, ) from chia_rs import compute_merkle_set_root test_constants = DEFAULT_CONSTANTS.replace( **{ "MIN_PLOT_SIZE": 18, "MIN_BLOCKS_PER_CHALLENGE_BLOCK": 12, "DIFFICULTY_STARTING": 2 ** 10, "DISCRIMINANT_SIZE_BITS": 16, "SUB_EPOCH_BLOCKS": 170, "WEIGHT_PROOF_THRESHOLD": 2, "WEIGHT_PROOF_RECENT_BLOCKS": 380, "DIFFICULTY_CONSTANT_FACTOR": 33554432, "NUM_SPS_SUB_SLOT": 16, # Must be a power of 2 "MAX_SUB_SLOT_BLOCKS": 50, "EPOCH_BLOCKS": 340, "BLOCKS_CACHE_SIZE": 340 + 3 * 50, # Coordinate with the above values "SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, vanillanet 600 "SUB_SLOT_ITERS_STARTING": 2 ** 10, # Must be a multiple of 64 "NUMBER_ZERO_BITS_PLOT_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes "MAX_FUTURE_TIME": 3600 * 24 * 10, # Allows creating blockchains with timestamps up to 10 days in the future, for testing "COST_PER_BYTE": 1337, "MEMPOOL_BLOCK_BUFFER": 6, "NETWORK_TYPE": 1, } ) log = logging.getLogger(__name__) class BlockTools: """ Tools to generate blocks for testing. """ _block_cache_header: bytes32 _block_cache_height_to_hash: Dict[uint32, bytes32] _block_cache_difficulty: uint64 _block_cache: Dict[bytes32, BlockRecord] def __init__( self, constants: ConsensusConstants = test_constants, root_path: Optional[Path] = None, const_dict=None, keychain: Optional[Keychain] = None, config_overrides: Optional[Dict] = None, ): self._block_cache_header = bytes32([0] * 32) self._tempdir = None if root_path is None: self._tempdir = tempfile.TemporaryDirectory() root_path = Path(self._tempdir.name) self.root_path = root_path self.local_keychain = keychain self._block_time_residual = 0.0 create_default_chinilla_config(root_path) create_all_ssl( root_path, private_ca_crt_and_key=get_next_private_ca_cert_and_key(), node_certs_and_keys=get_next_nodes_certs_and_keys(), ) self.local_sk_cache: Dict[bytes32, Tuple[PrivateKey, Any]] = {} self._config = load_config(self.root_path, "config.yaml") self._config["logging"]["log_stdout"] = True self._config["selected_network"] = "testnet0" for service in ["harvester", "farmer", "full_node", "wallet", "introducer", "timelord", "pool"]: self._config[service]["selected_network"] = "testnet0" # some tests start the daemon, make sure it's on a free port self._config["daemon_port"] = find_available_listen_port("BlockTools daemon") self._config = override_config(self._config, config_overrides) with lock_config(self.root_path, "config.yaml"): save_config(self.root_path, "config.yaml", self._config) overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]] updated_constants = constants.replace_str_to_bytes(**overrides) if const_dict is not None: updated_constants = updated_constants.replace(**const_dict) self.constants = updated_constants self.plot_dir: Path = get_plot_dir() self.temp_dir: Path = get_plot_tmp_dir() mkdir(self.plot_dir) mkdir(self.temp_dir) self.expected_plots: Dict[bytes32, Path] = {} self.created_plots: int = 0 self.total_result = PlotRefreshResult() def test_callback(event: PlotRefreshEvents, update_result: PlotRefreshResult): assert update_result.duration < 5 if event == PlotRefreshEvents.started: self.total_result = PlotRefreshResult() if event == PlotRefreshEvents.batch_processed: self.total_result.loaded += update_result.loaded self.total_result.processed += update_result.processed self.total_result.duration += update_result.duration assert update_result.remaining == len(self.expected_plots) - self.total_result.processed assert len(update_result.loaded) <= self.plot_manager.refresh_parameter.batch_size if event == PlotRefreshEvents.done: assert self.total_result.loaded == update_result.loaded assert self.total_result.processed == update_result.processed assert self.total_result.duration == update_result.duration assert update_result.remaining == 0 assert len(self.plot_manager.plots) == len(self.expected_plots) self.plot_manager: PlotManager = PlotManager( self.root_path, refresh_parameter=PlotsRefreshParameter(batch_size=uint32(2)), refresh_callback=test_callback, ) async def setup_keys(self): if self.local_keychain: self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=log) else: self.keychain_proxy = await connect_to_keychain_and_validate( self.root_path, log, user="testing-1.8.0", service="chinilla-testing-1.8.0" ) await self.keychain_proxy.delete_all_keys() self.farmer_master_sk_entropy = std_hash(b"block_tools farmer key") self.pool_master_sk_entropy = std_hash(b"block_tools pool key") self.farmer_master_sk = await self.keychain_proxy.add_private_key( bytes_to_mnemonic(self.farmer_master_sk_entropy), "" ) self.pool_master_sk = await self.keychain_proxy.add_private_key( bytes_to_mnemonic(self.pool_master_sk_entropy), "" ) self.farmer_pk = master_sk_to_farmer_sk(self.farmer_master_sk).get_g1() self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1() self.farmer_ph: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1() ) self.pool_ph: bytes32 = create_puzzlehash_for_pk( master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1() ) self.all_sks: List[PrivateKey] = [sk for sk, _ in await self.keychain_proxy.get_all_private_keys()] self.pool_pubkeys: List[G1Element] = [master_sk_to_pool_sk(sk).get_g1() for sk in self.all_sks] self.farmer_pubkeys: List[G1Element] = [master_sk_to_farmer_sk(sk).get_g1() for sk in self.all_sks] if len(self.pool_pubkeys) == 0 or len(self.farmer_pubkeys) == 0: raise RuntimeError("Keys not generated. Run `chinilla generate keys`") self.plot_manager.set_public_keys(self.farmer_pubkeys, self.pool_pubkeys) def change_config(self, new_config: Dict): self._config = new_config overrides = self._config["network_overrides"]["constants"][self._config["selected_network"]] updated_constants = self.constants.replace_str_to_bytes(**overrides) self.constants = updated_constants with lock_config(self.root_path, "config.yaml"): save_config(self.root_path, "config.yaml", self._config) def add_plot_directory(self, path: Path) -> None: self._config = add_plot_directory(self.root_path, str(path)) async def setup_plots(self): self.add_plot_directory(self.plot_dir) assert self.created_plots == 0 # OG Plots for i in range(15): await self.new_plot() # Pool Plots for i in range(5): await self.new_plot(self.pool_ph) # Some plots with keys that are not in the keychain for i in range(3): await self.new_plot( path=self.plot_dir / "not_in_keychain", plot_keys=PlotKeys(G1Element(), G1Element(), None), exclude_plots=True, ) await self.refresh_plots() async def new_plot( self, pool_contract_puzzle_hash: Optional[bytes32] = None, path: Path = None, tmp_dir: Path = None, plot_keys: Optional[PlotKeys] = None, exclude_plots: bool = False, ) -> Optional[bytes32]: final_dir = self.plot_dir if path is not None: final_dir = path mkdir(final_dir) if tmp_dir is None: tmp_dir = self.temp_dir args = Namespace() # Can't go much lower than 20, since plots start having no solutions and more buggy args.size = 20 # Uses many plots for testing, in order to guarantee proofs of space at every height args.num = 1 args.buffer = 100 args.tmp_dir = tmp_dir args.tmp2_dir = tmp_dir args.final_dir = final_dir args.plotid = None args.memo = None args.buckets = 0 args.stripe_size = 2000 args.num_threads = 0 args.nobitfield = False args.exclude_final_dir = False args.list_duplicates = False try: if plot_keys is None: pool_pk: Optional[G1Element] = None pool_address: Optional[str] = None if pool_contract_puzzle_hash is None: pool_pk = self.pool_pk else: pool_address = encode_puzzle_hash(pool_contract_puzzle_hash, "hcx") plot_keys = PlotKeys(self.farmer_pk, pool_pk, pool_address) # No datetime in the filename, to get deterministic filenames and not re-plot created, existed = await create_plots( args, plot_keys, use_datetime=False, test_private_keys=[AugSchemeMPL.key_gen(std_hash(self.created_plots.to_bytes(2, "big")))], ) self.created_plots += 1 plot_id_new: Optional[bytes32] = None path_new: Optional[Path] = None if len(created): assert len(existed) == 0 plot_id_new, path_new = list(created.items())[0] if len(existed): assert len(created) == 0 plot_id_new, path_new = list(existed.items())[0] assert plot_id_new is not None assert path_new is not None if not exclude_plots: self.expected_plots[plot_id_new] = path_new return plot_id_new except KeyboardInterrupt: shutil.rmtree(self.temp_dir, ignore_errors=True) sys.exit(1) async def refresh_plots(self): self.plot_manager.refresh_parameter = replace( self.plot_manager.refresh_parameter, batch_size=uint32(4 if len(self.expected_plots) % 3 == 0 else 3) ) # Make sure we have at least some batches + a remainder self.plot_manager.trigger_refresh() assert self.plot_manager.needs_refresh() self.plot_manager.start_refreshing(sleep_interval_ms=1) await time_out_assert_custom_interval(10, 0.001, self.plot_manager.needs_refresh, value=False) self.plot_manager.stop_refreshing() assert not self.plot_manager.needs_refresh() async def delete_plot(self, plot_id: bytes32): assert plot_id in self.expected_plots self.expected_plots[plot_id].unlink() del self.expected_plots[plot_id] await self.refresh_plots() @property def config(self) -> Dict: return copy.deepcopy(self._config) def get_daemon_ssl_context(self) -> ssl.SSLContext: crt_path = self.root_path / self.config["daemon_ssl"]["private_crt"] key_path = self.root_path / self.config["daemon_ssl"]["private_key"] ca_cert_path = self.root_path / self.config["private_ssl_ca"]["crt"] ca_key_path = self.root_path / self.config["private_ssl_ca"]["key"] return ssl_context_for_client(ca_cert_path, ca_key_path, crt_path, key_path) def get_plot_signature(self, m: bytes32, plot_pk: G1Element) -> G2Element: """ Returns the plot signature of the header data. """ farmer_sk = master_sk_to_farmer_sk(self.all_sks[0]) for plot_info in self.plot_manager.plots.values(): if plot_pk == plot_info.plot_public_key: # Look up local_sk from plot to save locked memory if plot_info.prover.get_id() in self.local_sk_cache: local_master_sk, pool_pk_or_ph = self.local_sk_cache[plot_info.prover.get_id()] else: pool_pk_or_ph, _, local_master_sk = parse_plot_info(plot_info.prover.get_memo()) self.local_sk_cache[plot_info.prover.get_id()] = (local_master_sk, pool_pk_or_ph) if isinstance(pool_pk_or_ph, G1Element): include_taproot = False else: assert isinstance(pool_pk_or_ph, bytes32) include_taproot = True local_sk = master_sk_to_local_sk(local_master_sk) agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_sk.get_g1(), include_taproot) assert agg_pk == plot_pk harv_share = AugSchemeMPL.sign(local_sk, m, agg_pk) farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk) if include_taproot: taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(local_sk.get_g1(), farmer_sk.get_g1()) taproot_share: G2Element = AugSchemeMPL.sign(taproot_sk, m, agg_pk) else: taproot_share = G2Element() return AugSchemeMPL.aggregate([harv_share, farm_share, taproot_share]) raise ValueError(f"Do not have key {plot_pk}") def get_pool_key_signature(self, pool_target: PoolTarget, pool_pk: Optional[G1Element]) -> Optional[G2Element]: # Returns the pool signature for the corresponding pk. If no pk is provided, returns None. if pool_pk is None: return None for sk in self.all_sks: sk_child = master_sk_to_pool_sk(sk) if sk_child.get_g1() == pool_pk: return AugSchemeMPL.sign(sk_child, bytes(pool_target)) raise ValueError(f"Do not have key {pool_pk}") def get_farmer_wallet_tool(self) -> WalletTool: return WalletTool(self.constants, self.farmer_master_sk) def get_pool_wallet_tool(self) -> WalletTool: return WalletTool(self.constants, self.pool_master_sk) def get_consecutive_blocks( self, num_blocks: int, block_list_input: List[FullBlock] = None, *, farmer_reward_puzzle_hash: Optional[bytes32] = None, pool_reward_puzzle_hash: Optional[bytes32] = None, transaction_data: Optional[SpendBundle] = None, seed: bytes = b"", time_per_block: Optional[float] = None, force_overflow: bool = False, skip_slots: int = 0, # Force at least this number of empty slots before the first SB guarantee_transaction_block: bool = False, # Force that this block must be a tx block keep_going_until_tx_block: bool = False, # keep making new blocks until we find a tx block normalized_to_identity_cc_eos: bool = False, normalized_to_identity_icc_eos: bool = False, normalized_to_identity_cc_sp: bool = False, normalized_to_identity_cc_ip: bool = False, current_time: bool = False, previous_generator: Optional[Union[CompressorArg, List[uint32]]] = None, genesis_timestamp: Optional[uint64] = None, force_plot_id: Optional[bytes32] = None, use_timestamp_residual: bool = False, ) -> List[FullBlock]: assert num_blocks > 0 if block_list_input is not None: block_list = block_list_input.copy() else: block_list = [] constants = self.constants transaction_data_included = False if time_per_block is None: time_per_block = float(constants.SUB_SLOT_TIME_TARGET) / float(constants.SLOT_BLOCKS_TARGET) if farmer_reward_puzzle_hash is None: farmer_reward_puzzle_hash = self.farmer_ph if len(block_list) == 0: if force_plot_id is not None: raise ValueError("Cannot specify plot_id for genesis block") initial_block_list_len = 0 genesis = self.create_genesis_block( constants, seed, force_overflow=force_overflow, skip_slots=skip_slots, timestamp=(uint64(int(time.time())) if genesis_timestamp is None else genesis_timestamp), ) log.info(f"Created block 0 iters: {genesis.total_iters}") num_empty_slots_added = skip_slots block_list = [genesis] num_blocks -= 1 else: initial_block_list_len = len(block_list) num_empty_slots_added = uint32(0) # Allows forcing empty slots in the beginning, for testing purposes if num_blocks == 0: return block_list blocks: Dict[bytes32, BlockRecord] if block_list[-1].header_hash == self._block_cache_header: height_to_hash = self._block_cache_height_to_hash difficulty = self._block_cache_difficulty blocks = self._block_cache else: height_to_hash, difficulty, blocks = load_block_list(block_list, constants) latest_block: BlockRecord = blocks[block_list[-1].header_hash] curr = latest_block while not curr.is_transaction_block: curr = blocks[curr.prev_hash] start_timestamp = curr.timestamp start_height = curr.height curr = latest_block blocks_added_this_sub_slot = 1 while not curr.first_in_sub_slot: curr = blocks[curr.prev_hash] blocks_added_this_sub_slot += 1 finished_sub_slots_at_sp: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to signage point finished_sub_slots_at_ip: List[EndOfSubSlotBundle] = [] # Sub-slots since last block, up to infusion point sub_slot_iters: uint64 = latest_block.sub_slot_iters # The number of iterations in one sub-slot same_slot_as_last = True # Only applies to first slot, to prevent old blocks from being added sub_slot_start_total_iters: uint128 = latest_block.ip_sub_slot_total_iters(constants) sub_slots_finished = 0 pending_ses: bool = False # Start at the last block in block list # Get the challenge for that slot while True: slot_cc_challenge, slot_rc_challenge = get_challenges( constants, blocks, finished_sub_slots_at_sp, latest_block.header_hash, ) prev_num_of_blocks = num_blocks if num_empty_slots_added < skip_slots: # If did not reach the target slots to skip, don't make any proofs for this sub-slot num_empty_slots_added += 1 else: # Loop over every signage point (Except for the last ones, which are used for overflows) for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA): curr = latest_block while curr.total_iters > sub_slot_start_total_iters + calculate_sp_iters( constants, sub_slot_iters, uint8(signage_point_index) ): if curr.height == 0: break curr = blocks[curr.prev_hash] if curr.total_iters > sub_slot_start_total_iters: finished_sub_slots_at_sp = [] if same_slot_as_last: if signage_point_index < latest_block.signage_point_index: # Ignore this signage_point because it's in the past continue signage_point: SignagePoint = get_signage_point( constants, BlockCache(blocks), latest_block, sub_slot_start_total_iters, uint8(signage_point_index), finished_sub_slots_at_sp, sub_slot_iters, normalized_to_identity_cc_sp, ) if signage_point_index == 0: cc_sp_output_hash: bytes32 = slot_cc_challenge else: assert signage_point.cc_vdf is not None cc_sp_output_hash = signage_point.cc_vdf.output.get_hash() qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge( constants, slot_cc_challenge, cc_sp_output_hash, seed, difficulty, sub_slot_iters, force_plot_id=force_plot_id, ) for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]): if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS or force_overflow: break if same_slot_as_last: if signage_point_index == latest_block.signage_point_index: # Ignore this block because it's in the past if required_iters <= latest_block.required_iters: continue assert latest_block.header_hash in blocks additions = None removals = None if transaction_data_included: transaction_data = None previous_generator = None if transaction_data is not None: additions = transaction_data.additions() removals = transaction_data.removals() assert start_timestamp is not None if proof_of_space.pool_contract_puzzle_hash is not None: if pool_reward_puzzle_hash is not None: # The caller wants to be paid to a specific address, but this PoSpace is tied to an # address, so continue until a proof of space tied to a pk is found continue pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0)) else: if pool_reward_puzzle_hash is not None: pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0)) else: pool_target = PoolTarget(self.pool_ph, uint32(0)) block_generator: Optional[BlockGenerator] if transaction_data is not None: if type(previous_generator) is CompressorArg: block_generator = best_solution_generator_from_template( previous_generator, transaction_data ) else: block_generator = simple_solution_generator(transaction_data) if type(previous_generator) is list: block_generator = BlockGenerator(block_generator.program, [], previous_generator) aggregate_signature = transaction_data.aggregated_signature else: block_generator = None aggregate_signature = G2Element() if not use_timestamp_residual: self._block_time_residual = 0.0 full_block, block_record, self._block_time_residual = get_full_block_and_block_record( constants, blocks, sub_slot_start_total_iters, uint8(signage_point_index), proof_of_space, slot_cc_challenge, slot_rc_challenge, farmer_reward_puzzle_hash, pool_target, start_timestamp, start_height, time_per_block, block_generator, aggregate_signature, additions, removals, height_to_hash, difficulty, required_iters, sub_slot_iters, self.get_plot_signature, self.get_pool_key_signature, finished_sub_slots_at_ip, signage_point, latest_block, seed, normalized_to_identity_cc_ip=normalized_to_identity_cc_ip, current_time=current_time, block_time_residual=self._block_time_residual, ) if block_record.is_transaction_block: transaction_data_included = True previous_generator = None keep_going_until_tx_block = False else: if guarantee_transaction_block: continue if pending_ses: pending_ses = False block_list.append(full_block) if full_block.transactions_generator is not None: compressor_arg = detect_potential_template_generator( full_block.height, full_block.transactions_generator ) if compressor_arg is not None: previous_generator = compressor_arg blocks_added_this_sub_slot += 1 blocks[full_block.header_hash] = block_record log.info(f"Created block {block_record.height} ove=False, iters " f"{block_record.total_iters}") height_to_hash[uint32(full_block.height)] = full_block.header_hash latest_block = blocks[full_block.header_hash] finished_sub_slots_at_ip = [] num_blocks -= 1 if num_blocks <= 0 and not keep_going_until_tx_block: self._block_cache_header = block_list[-1].header_hash self._block_cache_height_to_hash = height_to_hash self._block_cache_difficulty = difficulty self._block_cache = blocks return block_list # Finish the end of sub-slot and try again next sub-slot # End of sub-slot logic if len(finished_sub_slots_at_ip) == 0: # Block has been created within this sub-slot eos_iters: uint64 = uint64(sub_slot_iters - (latest_block.total_iters - sub_slot_start_total_iters)) cc_input: ClassgroupElement = latest_block.challenge_vdf_output rc_challenge: bytes32 = latest_block.reward_infusion_new_challenge else: # No blocks were successfully created within this sub-slot eos_iters = sub_slot_iters cc_input = ClassgroupElement.get_default_element() rc_challenge = slot_rc_challenge cc_vdf, cc_proof = get_vdf_info_and_proof( constants, cc_input, slot_cc_challenge, eos_iters, ) rc_vdf, rc_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), rc_challenge, eos_iters, ) eos_deficit: uint8 = ( latest_block.deficit if latest_block.deficit > 0 else constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK ) icc_eos_vdf, icc_ip_proof = get_icc( constants, uint128(sub_slot_start_total_iters + sub_slot_iters), finished_sub_slots_at_ip, latest_block, blocks, sub_slot_start_total_iters, eos_deficit, ) # End of slot vdf info for icc and cc have to be from challenge block or start of slot, respectively, # in order for light clients to validate. cc_vdf = VDFInfo(cc_vdf.challenge, sub_slot_iters, cc_vdf.output) if normalized_to_identity_cc_eos: _, cc_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), cc_vdf.challenge, sub_slot_iters, True, ) if pending_ses: sub_epoch_summary: Optional[SubEpochSummary] = None else: sub_epoch_summary = next_sub_epoch_summary( constants, BlockCache(blocks, height_to_hash=height_to_hash), latest_block.required_iters, block_list[-1], False, ) pending_ses = True ses_hash: Optional[bytes32] if sub_epoch_summary is not None: ses_hash = sub_epoch_summary.get_hash() new_sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty log.info(f"Sub epoch summary: {sub_epoch_summary}") else: ses_hash = None new_sub_slot_iters = None new_difficulty = None if icc_eos_vdf is not None: # Icc vdf (Deficit of latest block is <= 4) if len(finished_sub_slots_at_ip) == 0: # This means there are blocks in this sub-slot curr = latest_block while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot: curr = blocks[curr.prev_hash] if curr.is_challenge_block(constants): icc_eos_iters = uint64(sub_slot_start_total_iters + sub_slot_iters - curr.total_iters) else: icc_eos_iters = sub_slot_iters else: # This means there are no blocks in this sub-slot icc_eos_iters = sub_slot_iters icc_eos_vdf = VDFInfo( icc_eos_vdf.challenge, icc_eos_iters, icc_eos_vdf.output, ) if normalized_to_identity_icc_eos: _, icc_ip_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), icc_eos_vdf.challenge, icc_eos_iters, True, ) icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = InfusedChallengeChainSubSlot(icc_eos_vdf) assert icc_sub_slot is not None icc_sub_slot_hash = icc_sub_slot.get_hash() if latest_block.deficit == 0 else None cc_sub_slot = ChallengeChainSubSlot( cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty, ) else: # No icc icc_sub_slot = None cc_sub_slot = ChallengeChainSubSlot(cc_vdf, None, ses_hash, new_sub_slot_iters, new_difficulty) finished_sub_slots_at_ip.append( EndOfSubSlotBundle( cc_sub_slot, icc_sub_slot, RewardChainSubSlot( rc_vdf, cc_sub_slot.get_hash(), icc_sub_slot.get_hash() if icc_sub_slot is not None else None, eos_deficit, ), SubSlotProofs(cc_proof, icc_ip_proof, rc_proof), ) ) finished_sub_slots_eos = finished_sub_slots_at_ip.copy() latest_block_eos = latest_block overflow_cc_challenge = finished_sub_slots_at_ip[-1].challenge_chain.get_hash() overflow_rc_challenge = finished_sub_slots_at_ip[-1].reward_chain.get_hash() additions = None removals = None if transaction_data_included: transaction_data = None if transaction_data is not None: additions = transaction_data.additions() removals = transaction_data.removals() sub_slots_finished += 1 log.info( f"Sub slot finished. blocks included: {blocks_added_this_sub_slot} blocks_per_slot: " f"{(len(block_list) - initial_block_list_len)/sub_slots_finished}" ) blocks_added_this_sub_slot = 0 # Sub slot ended, overflows are in next sub slot # Handle overflows: No overflows on new epoch if new_sub_slot_iters is None and num_empty_slots_added >= skip_slots and new_difficulty is None: for signage_point_index in range( constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA, constants.NUM_SPS_SUB_SLOT, ): # note that we are passing in the finished slots which include the last slot signage_point = get_signage_point( constants, BlockCache(blocks), latest_block_eos, sub_slot_start_total_iters, uint8(signage_point_index), finished_sub_slots_eos, sub_slot_iters, normalized_to_identity_cc_sp, ) if signage_point_index == 0: cc_sp_output_hash = slot_cc_challenge else: assert signage_point is not None assert signage_point.cc_vdf is not None cc_sp_output_hash = signage_point.cc_vdf.output.get_hash() # If did not reach the target slots to skip, don't make any proofs for this sub-slot qualified_proofs = self.get_pospaces_for_challenge( constants, slot_cc_challenge, cc_sp_output_hash, seed, difficulty, sub_slot_iters, force_plot_id=force_plot_id, ) for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]): if blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_BLOCKS: break assert start_timestamp is not None if proof_of_space.pool_contract_puzzle_hash is not None: if pool_reward_puzzle_hash is not None: # The caller wants to be paid to a specific address, but this PoSpace is tied to an # address, so continue until a proof of space tied to a pk is found continue pool_target = PoolTarget(proof_of_space.pool_contract_puzzle_hash, uint32(0)) else: if pool_reward_puzzle_hash is not None: pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0)) else: pool_target = PoolTarget(self.pool_ph, uint32(0)) if transaction_data is not None: if previous_generator is not None and type(previous_generator) is CompressorArg: block_generator = best_solution_generator_from_template( previous_generator, transaction_data ) else: block_generator = simple_solution_generator(transaction_data) if type(previous_generator) is list: block_generator = BlockGenerator(block_generator.program, [], previous_generator) aggregate_signature = transaction_data.aggregated_signature else: block_generator = None aggregate_signature = G2Element() if not use_timestamp_residual: self._block_time_residual = 0.0 full_block, block_record, self._block_time_residual = get_full_block_and_block_record( constants, blocks, sub_slot_start_total_iters, uint8(signage_point_index), proof_of_space, slot_cc_challenge, slot_rc_challenge, farmer_reward_puzzle_hash, pool_target, start_timestamp, start_height, time_per_block, block_generator, aggregate_signature, additions, removals, height_to_hash, difficulty, required_iters, sub_slot_iters, self.get_plot_signature, self.get_pool_key_signature, finished_sub_slots_at_ip, signage_point, latest_block, seed, overflow_cc_challenge=overflow_cc_challenge, overflow_rc_challenge=overflow_rc_challenge, normalized_to_identity_cc_ip=normalized_to_identity_cc_ip, current_time=current_time, block_time_residual=self._block_time_residual, ) if block_record.is_transaction_block: transaction_data_included = True previous_generator = None keep_going_until_tx_block = False elif guarantee_transaction_block: continue if pending_ses: pending_ses = False block_list.append(full_block) if full_block.transactions_generator is not None: compressor_arg = detect_potential_template_generator( full_block.height, full_block.transactions_generator ) if compressor_arg is not None: previous_generator = compressor_arg blocks_added_this_sub_slot += 1 log.info(f"Created block {block_record.height } ov=True, iters " f"{block_record.total_iters}") num_blocks -= 1 blocks[full_block.header_hash] = block_record height_to_hash[uint32(full_block.height)] = full_block.header_hash latest_block = blocks[full_block.header_hash] finished_sub_slots_at_ip = [] if num_blocks <= 0 and not keep_going_until_tx_block: self._block_cache_header = block_list[-1].header_hash self._block_cache_height_to_hash = height_to_hash self._block_cache_difficulty = difficulty self._block_cache = blocks return block_list finished_sub_slots_at_sp = finished_sub_slots_eos.copy() same_slot_as_last = False sub_slot_start_total_iters = uint128(sub_slot_start_total_iters + sub_slot_iters) if num_blocks < prev_num_of_blocks: num_empty_slots_added += 1 if new_sub_slot_iters is not None: assert new_difficulty is not None sub_slot_iters = new_sub_slot_iters difficulty = new_difficulty def create_genesis_block( self, constants: ConsensusConstants, seed: bytes = b"", timestamp: Optional[uint64] = None, force_overflow: bool = False, skip_slots: int = 0, ) -> FullBlock: if timestamp is None: timestamp = uint64(int(time.time())) finished_sub_slots: List[EndOfSubSlotBundle] = [] unfinished_block: Optional[UnfinishedBlock] = None ip_iters: uint64 = uint64(0) sub_slot_total_iters: uint128 = uint128(0) # Keep trying until we get a good proof of space that also passes sp filter while True: cc_challenge, rc_challenge = get_challenges(constants, {}, finished_sub_slots, None) for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT): signage_point: SignagePoint = get_signage_point( constants, BlockCache({}, {}), None, sub_slot_total_iters, uint8(signage_point_index), finished_sub_slots, constants.SUB_SLOT_ITERS_STARTING, ) if signage_point_index == 0: cc_sp_output_hash: bytes32 = cc_challenge else: assert signage_point is not None assert signage_point.cc_vdf is not None cc_sp_output_hash = signage_point.cc_vdf.output.get_hash() # If did not reach the target slots to skip, don't make any proofs for this sub-slot qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge( constants, cc_challenge, cc_sp_output_hash, seed, constants.DIFFICULTY_STARTING, constants.SUB_SLOT_ITERS_STARTING, ) # Try each of the proofs of space for required_iters, proof_of_space in qualified_proofs: sp_iters: uint64 = calculate_sp_iters( constants, uint64(constants.SUB_SLOT_ITERS_STARTING), uint8(signage_point_index), ) ip_iters = calculate_ip_iters( constants, uint64(constants.SUB_SLOT_ITERS_STARTING), uint8(signage_point_index), required_iters, ) is_overflow = is_overflow_block(constants, uint8(signage_point_index)) if force_overflow and not is_overflow: continue if len(finished_sub_slots) < skip_slots: continue unfinished_block = create_test_unfinished_block( constants, sub_slot_total_iters, constants.SUB_SLOT_ITERS_STARTING, uint8(signage_point_index), sp_iters, ip_iters, proof_of_space, cc_challenge, constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH, PoolTarget(constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0)), self.get_plot_signature, self.get_pool_key_signature, signage_point, timestamp, BlockCache({}), seed=seed, finished_sub_slots_input=finished_sub_slots, ) assert unfinished_block is not None if not is_overflow: cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), cc_challenge, ip_iters, ) cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters) rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), rc_challenge, ip_iters, ) assert unfinished_block is not None total_iters_sp = uint128(sub_slot_total_iters + sp_iters) return unfinished_block_to_full_block( unfinished_block, cc_ip_vdf, cc_ip_proof, rc_ip_vdf, rc_ip_proof, None, None, finished_sub_slots, None, BlockCache({}), total_iters_sp, constants.DIFFICULTY_STARTING, ) if signage_point_index == constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA - 1: # Finish the end of sub-slot and try again next sub-slot cc_vdf, cc_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), cc_challenge, constants.SUB_SLOT_ITERS_STARTING, ) rc_vdf, rc_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), rc_challenge, constants.SUB_SLOT_ITERS_STARTING, ) cc_slot = ChallengeChainSubSlot(cc_vdf, None, None, None, None) finished_sub_slots.append( EndOfSubSlotBundle( cc_slot, None, RewardChainSubSlot( rc_vdf, cc_slot.get_hash(), None, uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK), ), SubSlotProofs(cc_proof, None, rc_proof), ) ) if unfinished_block is not None: cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), finished_sub_slots[-1].challenge_chain.get_hash(), ip_iters, ) rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), finished_sub_slots[-1].reward_chain.get_hash(), ip_iters, ) total_iters_sp = uint128( sub_slot_total_iters + calculate_sp_iters( self.constants, self.constants.SUB_SLOT_ITERS_STARTING, unfinished_block.reward_chain_block.signage_point_index, ) ) return unfinished_block_to_full_block( unfinished_block, cc_ip_vdf, cc_ip_proof, rc_ip_vdf, rc_ip_proof, None, None, finished_sub_slots, None, BlockCache({}), total_iters_sp, constants.DIFFICULTY_STARTING, ) sub_slot_total_iters = uint128(sub_slot_total_iters + constants.SUB_SLOT_ITERS_STARTING) def get_pospaces_for_challenge( self, constants: ConsensusConstants, challenge_hash: bytes32, signage_point: bytes32, seed: bytes, difficulty: uint64, sub_slot_iters: uint64, force_plot_id: Optional[bytes32] = None, ) -> List[Tuple[uint64, ProofOfSpace]]: found_proofs: List[Tuple[uint64, ProofOfSpace]] = [] rng = random.Random() rng.seed(seed) for plot_info in self.plot_manager.plots.values(): plot_id: bytes32 = plot_info.prover.get_id() if force_plot_id is not None and plot_id != force_plot_id: continue if ProofOfSpace.passes_plot_filter(constants, plot_id, challenge_hash, signage_point): new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, challenge_hash, signage_point) qualities = plot_info.prover.get_qualities_for_challenge(new_challenge) for proof_index, quality_str in enumerate(qualities): required_iters = calculate_iterations_quality( constants.DIFFICULTY_CONSTANT_FACTOR, quality_str, plot_info.prover.get_size(), difficulty, signage_point, ) if required_iters < calculate_sp_interval_iters(constants, sub_slot_iters): proof_xs: bytes = plot_info.prover.get_full_proof(new_challenge, proof_index) # Look up local_sk from plot to save locked memory ( pool_public_key_or_puzzle_hash, farmer_public_key, local_master_sk, ) = parse_plot_info(plot_info.prover.get_memo()) local_sk = master_sk_to_local_sk(local_master_sk) if isinstance(pool_public_key_or_puzzle_hash, G1Element): include_taproot = False else: assert isinstance(pool_public_key_or_puzzle_hash, bytes32) include_taproot = True plot_pk = ProofOfSpace.generate_plot_public_key( local_sk.get_g1(), farmer_public_key, include_taproot ) proof_of_space: ProofOfSpace = ProofOfSpace( new_challenge, plot_info.pool_public_key, plot_info.pool_contract_puzzle_hash, plot_pk, plot_info.prover.get_size(), proof_xs, ) found_proofs.append((required_iters, proof_of_space)) random_sample = found_proofs if len(found_proofs) >= 1: if rng.random() < 0.1: # Removes some proofs of space to create "random" chains, based on the seed random_sample = rng.sample(found_proofs, len(found_proofs) - 1) return random_sample def get_signage_point( constants: ConsensusConstants, blocks: BlockchainInterface, latest_block: Optional[BlockRecord], sub_slot_start_total_iters: uint128, signage_point_index: uint8, finished_sub_slots: List[EndOfSubSlotBundle], sub_slot_iters: uint64, normalized_to_identity_cc_sp: bool = False, ) -> SignagePoint: if signage_point_index == 0: return SignagePoint(None, None, None, None) sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index) overflow = is_overflow_block(constants, signage_point_index) sp_total_iters = uint128( sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index) ) ( cc_vdf_challenge, rc_vdf_challenge, cc_vdf_input, rc_vdf_input, cc_vdf_iters, rc_vdf_iters, ) = get_signage_point_vdf_info( constants, finished_sub_slots, overflow, latest_block, blocks, sp_total_iters, sp_iters, ) cc_sp_vdf, cc_sp_proof = get_vdf_info_and_proof( constants, cc_vdf_input, cc_vdf_challenge, cc_vdf_iters, ) rc_sp_vdf, rc_sp_proof = get_vdf_info_and_proof( constants, rc_vdf_input, rc_vdf_challenge, rc_vdf_iters, ) cc_sp_vdf = replace(cc_sp_vdf, number_of_iterations=sp_iters) if normalized_to_identity_cc_sp: _, cc_sp_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), cc_sp_vdf.challenge, sp_iters, True, ) return SignagePoint(cc_sp_vdf, cc_sp_proof, rc_sp_vdf, rc_sp_proof) def finish_block( constants: ConsensusConstants, blocks: Dict[bytes32, BlockRecord], height_to_hash: Dict[uint32, bytes32], finished_sub_slots: List[EndOfSubSlotBundle], sub_slot_start_total_iters: uint128, signage_point_index: uint8, unfinished_block: UnfinishedBlock, required_iters: uint64, ip_iters: uint64, slot_cc_challenge: bytes32, slot_rc_challenge: bytes32, latest_block: BlockRecord, sub_slot_iters: uint64, difficulty: uint64, normalized_to_identity_cc_ip: bool = False, ) -> Tuple[FullBlock, BlockRecord]: is_overflow = is_overflow_block(constants, signage_point_index) cc_vdf_challenge = slot_cc_challenge if len(finished_sub_slots) == 0: new_ip_iters = unfinished_block.total_iters - latest_block.total_iters cc_vdf_input = latest_block.challenge_vdf_output rc_vdf_challenge = latest_block.reward_infusion_new_challenge else: new_ip_iters = ip_iters cc_vdf_input = ClassgroupElement.get_default_element() rc_vdf_challenge = slot_rc_challenge cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof( constants, cc_vdf_input, cc_vdf_challenge, new_ip_iters, ) cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters) if normalized_to_identity_cc_ip: _, cc_ip_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), cc_ip_vdf.challenge, ip_iters, True, ) deficit = calculate_deficit( constants, uint32(latest_block.height + 1), latest_block, is_overflow, len(finished_sub_slots), ) icc_ip_vdf, icc_ip_proof = get_icc( constants, unfinished_block.total_iters, finished_sub_slots, latest_block, blocks, uint128(sub_slot_start_total_iters + sub_slot_iters) if is_overflow else sub_slot_start_total_iters, deficit, ) rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), rc_vdf_challenge, new_ip_iters, ) assert unfinished_block is not None sp_total_iters = uint128( sub_slot_start_total_iters + calculate_sp_iters(constants, sub_slot_iters, signage_point_index) ) full_block: FullBlock = unfinished_block_to_full_block( unfinished_block, cc_ip_vdf, cc_ip_proof, rc_ip_vdf, rc_ip_proof, icc_ip_vdf, icc_ip_proof, finished_sub_slots, latest_block, BlockCache(blocks), sp_total_iters, difficulty, ) block_record = block_to_block_record(constants, BlockCache(blocks), required_iters, full_block, None) return full_block, block_record def get_challenges( constants: ConsensusConstants, blocks: Dict[bytes32, BlockRecord], finished_sub_slots: List[EndOfSubSlotBundle], prev_header_hash: Optional[bytes32], ) -> Tuple[bytes32, bytes32]: if len(finished_sub_slots) == 0: if prev_header_hash is None: return constants.GENESIS_CHALLENGE, constants.GENESIS_CHALLENGE curr: BlockRecord = blocks[prev_header_hash] while not curr.first_in_sub_slot: curr = blocks[curr.prev_hash] assert curr.finished_challenge_slot_hashes is not None assert curr.finished_reward_slot_hashes is not None cc_challenge = curr.finished_challenge_slot_hashes[-1] rc_challenge = curr.finished_reward_slot_hashes[-1] else: cc_challenge = finished_sub_slots[-1].challenge_chain.get_hash() rc_challenge = finished_sub_slots[-1].reward_chain.get_hash() return cc_challenge, rc_challenge def get_plot_dir() -> Path: cache_path = DEFAULT_ROOT_PATH.parent.joinpath("test-plots") ci = os.environ.get("CI") if ci is not None and not cache_path.exists(): raise Exception(f"Running in CI and expected path not found: {cache_path!r}") mkdir(cache_path) return cache_path def get_plot_tmp_dir(): return get_plot_dir() / "tmp" def load_block_list( block_list: List[FullBlock], constants: ConsensusConstants ) -> Tuple[Dict[uint32, bytes32], uint64, Dict[bytes32, BlockRecord]]: difficulty = 0 height_to_hash: Dict[uint32, bytes32] = {} blocks: Dict[bytes32, BlockRecord] = {} for full_block in block_list: if full_block.height == 0: difficulty = uint64(constants.DIFFICULTY_STARTING) else: difficulty = full_block.weight - block_list[full_block.height - 1].weight if full_block.reward_chain_block.signage_point_index == 0: challenge = full_block.reward_chain_block.pos_ss_cc_challenge_hash sp_hash = challenge else: assert full_block.reward_chain_block.challenge_chain_sp_vdf is not None challenge = full_block.reward_chain_block.challenge_chain_sp_vdf.challenge sp_hash = full_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash() quality_str = full_block.reward_chain_block.proof_of_space.verify_and_get_quality_string( constants, challenge, sp_hash ) assert quality_str is not None required_iters: uint64 = calculate_iterations_quality( constants.DIFFICULTY_CONSTANT_FACTOR, quality_str, full_block.reward_chain_block.proof_of_space.size, uint64(difficulty), sp_hash, ) blocks[full_block.header_hash] = block_to_block_record( constants, BlockCache(blocks), required_iters, full_block, None, ) height_to_hash[uint32(full_block.height)] = full_block.header_hash return height_to_hash, uint64(difficulty), blocks def get_icc( constants: ConsensusConstants, vdf_end_total_iters: uint128, finished_sub_slots: List[EndOfSubSlotBundle], latest_block: BlockRecord, blocks: Dict[bytes32, BlockRecord], sub_slot_start_total_iters: uint128, deficit: uint8, ) -> Tuple[Optional[VDFInfo], Optional[VDFProof]]: if len(finished_sub_slots) == 0: prev_deficit = latest_block.deficit else: prev_deficit = finished_sub_slots[-1].reward_chain.deficit if deficit == prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK: # new slot / overflow sb to new slot / overflow sb return None, None if deficit == (prev_deficit - 1) == (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1): # new slot / overflow sb to challenge sb return None, None if len(finished_sub_slots) != 0: last_ss = finished_sub_slots[-1] assert last_ss.infused_challenge_chain is not None assert finished_sub_slots[-1].reward_chain.deficit <= (constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1) return get_vdf_info_and_proof( constants, ClassgroupElement.get_default_element(), last_ss.infused_challenge_chain.get_hash(), uint64(vdf_end_total_iters - sub_slot_start_total_iters), ) curr = latest_block # curr deficit is 0, 1, 2, 3, or 4 while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot: curr = blocks[curr.prev_hash] icc_iters = uint64(vdf_end_total_iters - latest_block.total_iters) if latest_block.is_challenge_block(constants): icc_input: Optional[ClassgroupElement] = ClassgroupElement.get_default_element() else: icc_input = latest_block.infused_challenge_vdf_output assert icc_input is not None if curr.is_challenge_block(constants): # Deficit 4 icc_challenge_hash = curr.challenge_block_info_hash else: assert curr.finished_infused_challenge_slot_hashes is not None # First block in sub slot has deficit 0,1,2 or 3 icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1] return get_vdf_info_and_proof( constants, icc_input, icc_challenge_hash, icc_iters, ) def round_timestamp(timestamp: float, residual: float) -> Tuple[int, float]: mod = math.modf(timestamp + residual) return (int(mod[1]), mod[0]) def get_full_block_and_block_record( constants: ConsensusConstants, blocks: Dict[bytes32, BlockRecord], sub_slot_start_total_iters: uint128, signage_point_index: uint8, proof_of_space: ProofOfSpace, slot_cc_challenge: bytes32, slot_rc_challenge: bytes32, farmer_reward_puzzle_hash: bytes32, pool_target: PoolTarget, start_timestamp: uint64, start_height: uint32, time_per_block: float, block_generator: Optional[BlockGenerator], aggregate_signature: G2Element, additions: Optional[List[Coin]], removals: Optional[List[Coin]], height_to_hash: Dict[uint32, bytes32], difficulty: uint64, required_iters: uint64, sub_slot_iters: uint64, get_plot_signature: Callable[[bytes32, G1Element], G2Element], get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]], finished_sub_slots: List[EndOfSubSlotBundle], signage_point: SignagePoint, prev_block: BlockRecord, seed: bytes = b"", *, overflow_cc_challenge: bytes32 = None, overflow_rc_challenge: bytes32 = None, normalized_to_identity_cc_ip: bool = False, current_time: bool = False, block_time_residual: float = 0.0, ) -> Tuple[FullBlock, BlockRecord, float]: if current_time is True: if prev_block.timestamp is not None: time_delta, block_time_residual = round_timestamp(time_per_block, block_time_residual) timestamp = uint64(max(int(time.time()), prev_block.timestamp + time_delta)) else: timestamp = uint64(int(time.time())) else: time_delta, block_time_residual = round_timestamp( (prev_block.height + 1 - start_height) * time_per_block, block_time_residual ) timestamp = uint64(start_timestamp + time_delta) sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index) ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters) unfinished_block = create_test_unfinished_block( constants, sub_slot_start_total_iters, sub_slot_iters, signage_point_index, sp_iters, ip_iters, proof_of_space, slot_cc_challenge, farmer_reward_puzzle_hash, pool_target, get_plot_signature, get_pool_signature, signage_point, timestamp, BlockCache(blocks), seed, block_generator, aggregate_signature, additions, removals, prev_block, finished_sub_slots, ) if (overflow_cc_challenge is not None) and (overflow_rc_challenge is not None): slot_cc_challenge = overflow_cc_challenge slot_rc_challenge = overflow_rc_challenge full_block, block_record = finish_block( constants, blocks, height_to_hash, finished_sub_slots, sub_slot_start_total_iters, signage_point_index, unfinished_block, required_iters, ip_iters, slot_cc_challenge, slot_rc_challenge, prev_block, sub_slot_iters, difficulty, normalized_to_identity_cc_ip, ) return full_block, block_record, block_time_residual def compute_cost_test(generator: BlockGenerator, cost_per_byte: int) -> Tuple[Optional[uint16], uint64]: try: block_program, block_program_args = setup_generator_args(generator) clvm_cost, result = GENERATOR_MOD.run_mempool_with_cost(INFINITE_COST, block_program, block_program_args) size_cost = len(bytes(generator.program)) * cost_per_byte condition_cost = 0 for res in result.first().as_iter(): res = res.rest() # skip parent coind id res = res.rest() # skip puzzle hash res = res.rest() # skip amount for cond in res.first().as_iter(): condition = cond.first().as_atom() if condition in [ConditionOpcode.AGG_SIG_UNSAFE, ConditionOpcode.AGG_SIG_ME]: condition_cost += ConditionCost.AGG_SIG.value elif condition == ConditionOpcode.CREATE_COIN: condition_cost += ConditionCost.CREATE_COIN.value return None, uint64(clvm_cost + size_cost + condition_cost) except Exception: return uint16(Err.GENERATOR_RUNTIME_ERROR.value), uint64(0) def create_test_foliage( constants: ConsensusConstants, reward_block_unfinished: RewardChainBlockUnfinished, block_generator: Optional[BlockGenerator], aggregate_sig: G2Element, additions: List[Coin], removals: List[Coin], prev_block: Optional[BlockRecord], blocks: BlockchainInterface, total_iters_sp: uint128, timestamp: uint64, farmer_reward_puzzlehash: bytes32, pool_target: PoolTarget, get_plot_signature: Callable[[bytes32, G1Element], G2Element], get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]], seed: bytes = b"", ) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]: """ Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block, the return values are not None. This is called at the signage point, so some of this information may be tweaked at the infusion point. Args: constants: consensus constants being used for this chain reward_block_unfinished: the reward block to look at, potentially at the signage point block_generator: transactions to add to the foliage block, if created aggregate_sig: aggregate of all transctions (or infinity element) prev_block: the previous block at the signage point blocks: dict from header hash to blocks, of all ancestor blocks total_iters_sp: total iters at the signage point timestamp: timestamp to put into the foliage block farmer_reward_puzzlehash: where to pay out farming reward pool_target: where to pay out pool reward get_plot_signature: retrieve the signature corresponding to the plot public key get_pool_signature: retrieve the signature corresponding to the pool public key seed: seed to randomize block """ if prev_block is not None: res = get_prev_transaction_block(prev_block, blocks, total_iters_sp) is_transaction_block: bool = res[0] prev_transaction_block: Optional[BlockRecord] = res[1] else: # Genesis is a transaction block prev_transaction_block = None is_transaction_block = True rng = random.Random() rng.seed(seed) # Use the extension data to create different blocks based on header hash extension_data: bytes32 = bytes32(rng.randint(0, 100000000).to_bytes(32, "big")) if prev_block is None: height: uint32 = uint32(0) else: height = uint32(prev_block.height + 1) # Create filter byte_array_tx: List[bytearray] = [] tx_additions: List[Coin] = [] tx_removals: List[bytes32] = [] pool_target_signature: Optional[G2Element] = get_pool_signature( pool_target, reward_block_unfinished.proof_of_space.pool_public_key ) foliage_data = FoliageBlockData( reward_block_unfinished.get_hash(), pool_target, pool_target_signature, farmer_reward_puzzlehash, extension_data, ) foliage_block_data_signature: G2Element = get_plot_signature( foliage_data.get_hash(), reward_block_unfinished.proof_of_space.plot_public_key, ) prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE if height != 0: assert prev_block is not None prev_block_hash = prev_block.header_hash generator_block_heights_list: List[uint32] = [] if is_transaction_block: cost = uint64(0) # Calculate the cost of transactions if block_generator is not None: generator_block_heights_list = block_generator.block_height_list err, cost = compute_cost_test(block_generator, constants.COST_PER_BYTE) assert err is None removal_amount = 0 addition_amount = 0 for coin in removals: removal_amount += coin.amount for coin in additions: addition_amount += coin.amount spend_bundle_fees = removal_amount - addition_amount # in order to allow creating blocks that mint coins, clamp the fee # to 0, if it ends up being negative if spend_bundle_fees < 0: spend_bundle_fees = 0 else: spend_bundle_fees = 0 reward_claims_incorporated = [] if height > 0: assert prev_transaction_block is not None assert prev_block is not None curr: BlockRecord = prev_block while not curr.is_transaction_block: curr = blocks.block_record(curr.prev_hash) assert curr.fees is not None pool_coin = create_pool_coin( curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE ) farmer_coin = create_farmer_coin( curr.height, curr.farmer_puzzle_hash, uint64(calculate_base_farmer_reward(curr.height) + curr.fees), constants.GENESIS_CHALLENGE, ) assert curr.header_hash == prev_transaction_block.header_hash reward_claims_incorporated += [pool_coin, farmer_coin] if curr.height > 0: curr = blocks.block_record(curr.prev_hash) # Prev block is not genesis while not curr.is_transaction_block: pool_coin = create_pool_coin( curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE, ) farmer_coin = create_farmer_coin( curr.height, curr.farmer_puzzle_hash, calculate_base_farmer_reward(curr.height), constants.GENESIS_CHALLENGE, ) reward_claims_incorporated += [pool_coin, farmer_coin] curr = blocks.block_record(curr.prev_hash) additions.extend(reward_claims_incorporated.copy()) for coin in additions: tx_additions.append(coin) byte_array_tx.append(bytearray(coin.puzzle_hash)) for coin in removals: tx_removals.append(coin.name()) byte_array_tx.append(bytearray(coin.name())) bip158: PyBIP158 = PyBIP158(byte_array_tx) encoded = bytes(bip158.GetEncoded()) additions_merkle_items: List[bytes32] = [] # Create addition Merkle set puzzlehash_coin_map: Dict[bytes32, List[bytes32]] = {} for coin in tx_additions: if coin.puzzle_hash in puzzlehash_coin_map: puzzlehash_coin_map[coin.puzzle_hash].append(coin.name()) else: puzzlehash_coin_map[coin.puzzle_hash] = [coin.name()] # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash for puzzle, coin_ids in puzzlehash_coin_map.items(): additions_merkle_items.append(puzzle) additions_merkle_items.append(hash_coin_ids(coin_ids)) additions_root = bytes32(compute_merkle_set_root(additions_merkle_items)) removals_root = bytes32(compute_merkle_set_root(tx_removals)) generator_hash = bytes32([0] * 32) if block_generator is not None: generator_hash = std_hash(block_generator.program) generator_refs_hash = bytes32([1] * 32) if generator_block_heights_list not in (None, []): generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list]) generator_refs_hash = std_hash(generator_ref_list_bytes) filter_hash: bytes32 = std_hash(encoded) transactions_info: Optional[TransactionsInfo] = TransactionsInfo( generator_hash, generator_refs_hash, aggregate_sig, uint64(spend_bundle_fees), cost, reward_claims_incorporated, ) if prev_transaction_block is None: prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE else: prev_transaction_block_hash = prev_transaction_block.header_hash assert transactions_info is not None foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock( prev_transaction_block_hash, timestamp, filter_hash, additions_root, removals_root, transactions_info.get_hash(), ) assert foliage_transaction_block is not None foliage_transaction_block_hash: Optional[bytes32] = foliage_transaction_block.get_hash() assert foliage_transaction_block_hash is not None foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature( foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key, ) assert foliage_transaction_block_signature is not None else: foliage_transaction_block_hash = None foliage_transaction_block_signature = None foliage_transaction_block = None transactions_info = None assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None) foliage = Foliage( prev_block_hash, reward_block_unfinished.get_hash(), foliage_data, foliage_block_data_signature, foliage_transaction_block_hash, foliage_transaction_block_signature, ) return foliage, foliage_transaction_block, transactions_info def create_test_unfinished_block( constants: ConsensusConstants, sub_slot_start_total_iters: uint128, sub_slot_iters: uint64, signage_point_index: uint8, sp_iters: uint64, ip_iters: uint64, proof_of_space: ProofOfSpace, slot_cc_challenge: bytes32, farmer_reward_puzzle_hash: bytes32, pool_target: PoolTarget, get_plot_signature: Callable[[bytes32, G1Element], G2Element], get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]], signage_point: SignagePoint, timestamp: uint64, blocks: BlockchainInterface, seed: bytes = b"", block_generator: Optional[BlockGenerator] = None, aggregate_sig: G2Element = G2Element(), additions: Optional[List[Coin]] = None, removals: Optional[List[Coin]] = None, prev_block: Optional[BlockRecord] = None, finished_sub_slots_input: List[EndOfSubSlotBundle] = None, ) -> UnfinishedBlock: """ Creates a new unfinished block using all the information available at the signage point. This will have to be modified using information from the infusion point. Args: constants: consensus constants being used for this chain sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot sub_slot_iters: sub-slot-iters at the infusion point epoch signage_point_index: signage point index of the block to create sp_iters: sp_iters of the block to create ip_iters: ip_iters of the block to create proof_of_space: proof of space of the block to create slot_cc_challenge: challenge hash at the sp sub-slot farmer_reward_puzzle_hash: where to pay out farmer rewards pool_target: where to pay out pool rewards get_plot_signature: function that returns signature corresponding to plot public key get_pool_signature: function that returns signature corresponding to pool public key signage_point: signage point information (VDFs) timestamp: timestamp to add to the foliage block, if created seed: seed to randomize chain block_generator: transactions to add to the foliage block, if created aggregate_sig: aggregate of all transctions (or infinity element) additions: Coins added in spend_bundle removals: Coins removed in spend_bundle prev_block: previous block (already in chain) from the signage point blocks: dictionary from header hash to SBR of all included SBR finished_sub_slots_input: finished_sub_slots at the signage point Returns: """ if finished_sub_slots_input is None: finished_sub_slots: List[EndOfSubSlotBundle] = [] else: finished_sub_slots = finished_sub_slots_input.copy() overflow: bool = sp_iters > ip_iters total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters) is_genesis: bool = prev_block is None new_sub_slot: bool = len(finished_sub_slots) > 0 cc_sp_hash: bytes32 = slot_cc_challenge # Only enters this if statement if we are in testing mode (making VDF proofs here) if signage_point.cc_vdf is not None: assert signage_point.rc_vdf is not None cc_sp_hash = signage_point.cc_vdf.output.get_hash() rc_sp_hash = signage_point.rc_vdf.output.get_hash() else: if new_sub_slot: rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash() else: if is_genesis: rc_sp_hash = constants.GENESIS_CHALLENGE else: assert prev_block is not None assert blocks is not None curr = prev_block while not curr.first_in_sub_slot: curr = blocks.block_record(curr.prev_hash) assert curr.finished_reward_slot_hashes is not None rc_sp_hash = curr.finished_reward_slot_hashes[-1] signage_point = SignagePoint(None, None, None, None) cc_sp_signature: Optional[G2Element] = get_plot_signature( cc_sp_hash, proof_of_space.plot_public_key, ) rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key) assert cc_sp_signature is not None assert rc_sp_signature is not None assert AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature) total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0)) rc_block = RewardChainBlockUnfinished( total_iters, signage_point_index, slot_cc_challenge, proof_of_space, signage_point.cc_vdf, cc_sp_signature, signage_point.rc_vdf, rc_sp_signature, ) if additions is None: additions = [] if removals is None: removals = [] (foliage, foliage_transaction_block, transactions_info,) = create_test_foliage( constants, rc_block, block_generator, aggregate_sig, additions, removals, prev_block, blocks, total_iters_sp, timestamp, farmer_reward_puzzle_hash, pool_target, get_plot_signature, get_pool_signature, seed, ) return UnfinishedBlock( finished_sub_slots, rc_block, signage_point.cc_proof, signage_point.rc_proof, foliage, foliage_transaction_block, transactions_info, block_generator.program if block_generator else None, block_generator.block_height_list if block_generator else [], ) # Remove these counters when `create_block_tools` and `create_block_tools_async` are removed create_block_tools_async_count = 0 create_block_tools_count = 0 # Note: tests that still use `create_block_tools` and `create_block_tools_async` should probably be # moved to the bt fixture in conftest.py. Take special care to find out if the users of these functions # need different BlockTools instances # All tests need different root directories containing different config.yaml files. # The daemon's listen port is configured in the config.yaml, and the only way a test can control which # listen port it uses is to write it to the config file. async def create_block_tools_async( constants: ConsensusConstants = test_constants, root_path: Optional[Path] = None, const_dict=None, keychain: Optional[Keychain] = None, config_overrides: Optional[Dict] = None, ) -> BlockTools: global create_block_tools_async_count create_block_tools_async_count += 1 print(f" create_block_tools_async called {create_block_tools_async_count} times") bt = BlockTools(constants, root_path, const_dict, keychain, config_overrides=config_overrides) await bt.setup_keys() await bt.setup_plots() return bt def create_block_tools( constants: ConsensusConstants = test_constants, root_path: Optional[Path] = None, const_dict=None, keychain: Optional[Keychain] = None, config_overrides: Optional[Dict] = None, ) -> BlockTools: global create_block_tools_count create_block_tools_count += 1 print(f" create_block_tools called {create_block_tools_count} times") bt = BlockTools(constants, root_path, const_dict, keychain, config_overrides=config_overrides) asyncio.get_event_loop().run_until_complete(bt.setup_keys()) asyncio.get_event_loop().run_until_complete(bt.setup_plots()) return bt def make_unfinished_block(block: FullBlock, constants: ConsensusConstants) -> UnfinishedBlock: if is_overflow_block(constants, block.reward_chain_block.signage_point_index): finished_ss = block.finished_sub_slots[:-1] else: finished_ss = block.finished_sub_slots return UnfinishedBlock( finished_ss, block.reward_chain_block.get_unfinished(), block.challenge_chain_sp_proof, block.reward_chain_sp_proof, block.foliage, block.foliage_transaction_block, block.transactions_info, block.transactions_generator, block.transactions_generator_ref_list, )
43.399713
120
0.611281
4a0148884fe805bd8a9357c3694bc65aa312ac84
14,552
py
Python
processor/processor.py
jiangwenj02/CurveNet-V1
781f14f41349af97350d65ab3993851063bee87c
[ "MIT" ]
null
null
null
processor/processor.py
jiangwenj02/CurveNet-V1
781f14f41349af97350d65ab3993851063bee87c
[ "MIT" ]
null
null
null
processor/processor.py
jiangwenj02/CurveNet-V1
781f14f41349af97350d65ab3993851063bee87c
[ "MIT" ]
null
null
null
#!/usr/bin/env python # pylint: disable=W0201 import sys import argparse import yaml import numpy as np import random import os.path as osp # torch import torch import torch.nn as nn import torch.optim as optim # torchlight import torchlight from torchlight import str2bool from torchlight import DictAction from torchlight import import_class import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn.metrics import confusion_matrix from sklearn.metrics import recall_score from .io import IO import subprocess class Processor(IO): """ Base Processor """ def __init__(self, argv=None): self.load_arg(argv) self.init_environment() self.load_model() self.load_weights() self.gpu() self.load_data() self.load_optimizer() self.label = [] def init_environment(self): super().init_environment() self.result = dict() self.iter_info = dict() self.epoch_info = dict() self.meta_info = dict(epoch=0, iter=0) self.set_seed(self.arg.seed) def get_gpu_memory_map(self): """Get the current gpu usage. Returns ------- usage: dict Keys are device ids as integers. Values are memory usage as integers in MB. """ result = subprocess.check_output( [ 'nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader' ], encoding='utf-8') # Convert lines into a dictionary gpu_memory = [int(x) for x in result.strip().split('\n')] gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory)) return gpu_memory_map def load_optimizer(self): pass def test_conf(self, evaluation=True): self.model.eval() loader = self.data_loader['test'] loss_value = [] result_frag = [] label_frag = [] for data, label, index, _ in loader: # get data data = data.float().to(self.dev) label = label.long().to(self.dev) # inference with torch.no_grad(): output = self.model(data) result_frag.append(output.data.cpu().numpy()) # get loss if evaluation: loss = self.loss(output, label) loss_value.append(loss.mean().item()) label_frag.append(label.data.cpu().numpy()) self.result = np.concatenate(result_frag) if evaluation: self.label = np.concatenate(label_frag) self.epoch_info['mean_loss']= np.mean(loss_value) self.show_epoch_info() # show top-k accuracy for k in self.arg.show_topk: self.show_topk(k) rank = self.result.argsort() rank = rank[:, -1] plt.figure(figsize=(5,5)) confusion = confusion_matrix(self.label, rank) print(confusion[0, :].sum()) confusion = confusion / confusion[0, :].sum() confusion = 100 * confusion plt.matshow(confusion, cmap=plt.cm.Greens) plt.colorbar() # for i in range(len(confusion)): # for j in range(len(confusion)): # string = str(round(confusion[i,j],1)) # plt.annotate(string, xy=(i, j), horizontalalignment='center', verticalalignment='center', fontsize=8) plt.title('Ours', fontsize=18) plt.ylabel('True label', fontsize=15) plt.xlabel('Predicted label', fontsize=15) plt.savefig(osp.join(self.arg.work_dir, 'confusion.jpg'), bbox_inches='tight') def save_model(self, model, name): model_path = '{}/{}'.format(self.work_dir, name) torch.save({ 'model_state_dict': self.model.state_dict(), 'sensinet_state_dict': self.sensinet.state_dict(), 'optimizer_state_dict': self.optimizer.state_dict(), 'optimizer_sensinet_state_dict': self.optimizer_sensinet.state_dict(), 'meta_epoch': self.meta_info['epoch'], 'meta_iter': self.meta_info['iter'] }, model_path) self.print_log('The model has been saved as {}.'.format(model_path)) def load_weights(self): # self.arg.phase = 'test' # self.arg.weights = osp.join(self.arg.work_dir, 'best_model.pt') if self.arg.weights: checkpoint = torch.load(self.arg.weights) self.model.load_state_dict(checkpoint) # self.model.load_state_dict(checkpoint['model_state_dict']) # self.sensinet.load_state_dict(checkpoint['sensinet_state_dict']) # self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # self.optimizer_sensinet.load_state_dict(checkpoint['optimizer_sensinet_state_dict']) # self.arg.start_epoch = checkpoint['meta_epoch'] # self.meta_info['meta_iter'] = checkpoint['meta_iter'] def show_topk(self, k): rank = self.result.argsort() hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)] hit_top_k_cls = [] hit_top_k_cls_num = [] for cls in range(self.arg.model_args['num_classes']): hit_top_k_cls.append([(l in rank[i, -k:]) * (l == cls) for i, l in enumerate(self.label)]) hit_top_k_cls_num.append([l == cls for i, l in enumerate(self.label)]) accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k) accuracy_cls = [sum(hit_top_k_cls[i]) * 1.0 / sum(hit_top_k_cls_num[i]) for i in range(self.arg.model_args['num_classes'])] if accuracy > self.best_acc: self.best_acc = accuracy filename = 'best_model.pt' self.io.save_model(self.model, filename) self.train_writer.add_scalar('accuracy/test_acc', 100 * accuracy, self.meta_info['epoch']) for i in range(self.arg.model_args['num_classes']): self.train_writer.add_scalar('accuracy/test_acc_cls_' + str(i), 100 * accuracy_cls[i], self.meta_info['epoch']) self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy)) self.io.print_log('\tBest accuracy Top{}: {:.2f}%'.format(k, 100 * self.best_acc)) def load_data(self): Feeder = import_class(self.arg.feeder) if 'debug' not in self.arg.train_feeder_args: self.arg.train_feeder_args['debug'] = self.arg.debug self.data_loader = dict() if self.arg.phase == 'train': self.data_loader['train'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, num_workers=self.arg.num_worker * torchlight.ngpu( self.arg.device), drop_last=True, pin_memory=True) self.data_loader['meta_train'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, num_workers=self.arg.num_worker * torchlight.ngpu( self.arg.device), drop_last=True, pin_memory=True) if self.arg.test_feeder_args: self.data_loader['test'] = torch.utils.data.DataLoader( dataset=Feeder(**self.arg.test_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker * torchlight.ngpu( self.arg.device), pin_memory=True) def show_epoch_info(self): for k, v in self.epoch_info.items(): self.io.print_log('\t{}: {}'.format(k, v)) if self.arg.pavi_log: self.io.log('train', self.meta_info['iter'], self.epoch_info) def show_iter_info(self): if self.meta_info['iter'] % self.arg.log_interval == 0: info ='\tIter {} Done.'.format(self.meta_info['iter']) for k, v in self.iter_info.items(): if isinstance(v, float): info = info + ' | {}: {:.4f}'.format(k, v) else: info = info + ' | {}: {}'.format(k, v) self.io.print_log(info) if self.arg.pavi_log: self.io.log('train', self.meta_info['iter'], self.iter_info) def train(self): for _ in range(100): self.iter_info['loss'] = 0 self.show_iter_info() self.meta_info['iter'] += 1 self.epoch_info['mean loss'] = 0 self.show_epoch_info() def test(self): for _ in range(100): self.iter_info['loss'] = 1 self.show_iter_info() self.epoch_info['mean loss'] = 1 self.show_epoch_info() def set_seed(self, seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.deterministic = True def start(self): self.io.print_log('Parameters:\n{}\n'.format(str(vars(self.arg)))) # training phase if self.arg.phase == 'train': for epoch in range(self.arg.start_epoch, self.arg.num_epoch): self.meta_info['epoch'] = epoch # training self.io.print_log('Training epoch: {}'.format(epoch)) self.train() self.io.print_log('Done.') # save model if ((epoch + 1) % self.arg.save_interval == 0) or ( epoch + 1 == self.arg.num_epoch): filename = 'epoch{}_model.pt'.format(epoch + 1) self.io.save_model(self.model, filename) # evaluation if ((epoch + 1) % self.arg.eval_interval == 0) or ( epoch + 1 == self.arg.num_epoch): self.io.print_log('Eval epoch: {}'.format(epoch)) self.test() self.io.print_log('Done.') # test phase elif self.arg.phase == 'test': # the path of weights must be appointed if self.arg.weights is None: raise ValueError('Please appoint --weights.') self.io.print_log('Model: {}.'.format(self.arg.model)) self.io.print_log('Weights: {}.'.format(self.arg.weights)) # evaluation self.io.print_log('Evaluation Start:') self.test() self.io.print_log('Done.\n') # save the output of model if self.arg.save_result: result_dict = dict( zip(self.data_loader['test'].dataset.sample_name, self.result)) self.io.save_pkl(result_dict, 'test_result.pkl') @staticmethod def get_parser(add_help=False): #region arguments yapf: disable # parameter priority: command line > config > default parser = argparse.ArgumentParser( add_help=add_help, description='Base Processor') parser.add_argument('-w', '--work_dir', default='./work_dir/tmp', help='the work folder for storing results') parser.add_argument('-c', '--config', default=None, help='path to the configuration file') # processor parser.add_argument('--phase', default='train', help='must be train or test') parser.add_argument('--save_result', type=str2bool, default=False, help='if ture, the output of the model will be stored') parser.add_argument('--start_epoch', type=int, default=0, help='start training from which epoch') parser.add_argument('--num_epoch', type=int, default=80, help='stop training in which epoch') parser.add_argument('--use_gpu', type=str2bool, default=True, help='use GPUs or not') parser.add_argument('--device', type=int, default=0, nargs='+', help='the indexes of GPUs for training or testing') # visulize and debug parser.add_argument('--log_interval', type=int, default=100, help='the interval for printing messages (#iteration)') parser.add_argument('--save_interval', type=int, default=10, help='the interval for storing models (#iteration)') parser.add_argument('--eval_interval', type=int, default=5, help='the interval for evaluating models (#iteration)') parser.add_argument('--save_log', type=str2bool, default=True, help='save logging or not') parser.add_argument('--print_log', type=str2bool, default=True, help='print logging or not') parser.add_argument('--pavi_log', type=str2bool, default=False, help='logging on pavi or not') # feeder parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used') parser.add_argument('--num_worker', type=int, default=4, help='the number of worker per gpu for data loader') parser.add_argument('--train_feeder_args', action=DictAction, default=dict(), help='the arguments of data loader for training') parser.add_argument('--train_meta_feeder_args', action=DictAction, default=dict(), help='the arguments of meta data loader for training') parser.add_argument('--test_feeder_args', action=DictAction, default=dict(), help='the arguments of data loader for test') parser.add_argument('--batch_size', type=int, default=256, help='training batch size') parser.add_argument('--test_batch_size', type=int, default=256, help='test batch size') parser.add_argument('--debug', action="store_true", help='less data, faster loading') # model parser.add_argument('--model', default=None, help='the model will be used') parser.add_argument('--model_args', action=DictAction, default=dict(), help='the arguments of model') parser.add_argument('--weights', default=None, help='the weights for network initialization') parser.add_argument('--ignore_weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization') parser.add_argument('--warmup_epoch', type=int, default=0, help='the name of weights which will be ignored in the initialization') parser.add_argument('--alpha_factor', type=float, default=0.1, help='initial learning rate') parser.add_argument('--seed', type=int, default=1, help='the model will be used') #endregion yapf: enable return parser
42.17971
152
0.601292
4a0149e9a05bd7c9c0c8b4076fdb226e62443a16
32,059
py
Python
Joy_QA_Platform/httprunner/testcase.py
bzc128/Joy_QA_Platform
d3325331cd832a22e91ad895ab793577609aabc4
[ "Apache-2.0" ]
123
2019-03-01T06:07:43.000Z
2021-12-11T07:59:20.000Z
Joy_QA_Platform/httprunner/testcase.py
bzc128/Joy_QA_Platform
d3325331cd832a22e91ad895ab793577609aabc4
[ "Apache-2.0" ]
8
2019-03-06T06:33:34.000Z
2021-06-10T21:13:55.000Z
Joy_QA_Platform/httprunner/testcase.py
bzc128/Joy_QA_Platform
d3325331cd832a22e91ad895ab793577609aabc4
[ "Apache-2.0" ]
54
2019-03-01T02:25:13.000Z
2021-12-23T16:55:17.000Z
# encoding: utf-8 import ast import collections import io import itertools import json import os import random import re from httprunner import exception, logger, utils from httprunner.compat import OrderedDict, basestring, numeric_types from httprunner.utils import FileUtils # TODO 正则漏掉了@符号 variable_regexp = r"\$([\w_]+)" function_regexp = r"\$\{([\w_]+\([\$\w\.@\-_ =,]*\))\}" function_regexp_compile = re.compile(r"^([\w_]+)\(([\$\w\.@\-_ =,]*)\)$") def extract_variables(content): """ extract all variable names from content, which is in format $variable @param (str) content @return (list) variable name list e.g. $variable => ["variable"] /blog/$postid => ["postid"] /$var1/$var2 => ["var1", "var2"] abc => [] """ try: return re.findall(variable_regexp, content) except TypeError: return [] def extract_functions(content): """ extract all functions from string content, which are in format ${fun()} @param (str) content @return (list) functions list e.g. ${func(5)} => ["func(5)"] ${func(a=1, b=2)} => ["func(a=1, b=2)"] /api/1000?_t=${get_timestamp()} => ["get_timestamp()"] /api/${add(1, 2)} => ["add(1, 2)"] "/api/${add(1, 2)}?_t=${get_timestamp()}" => ["add(1, 2)", "get_timestamp()"] """ try: return re.findall(function_regexp, content) except TypeError: return [] def parse_string_value(str_value): """ parse string to number if possible e.g. "123" => 123 "12.2" => 12.3 "abc" => "abc" "$var" => "$var" """ try: return ast.literal_eval(str_value) except ValueError: return str_value except SyntaxError: # e.g. $var, ${func} return str_value def parse_function(content): """ parse function name and args from string content. @param (str) content @return (dict) function name and args e.g. func() => {'func_name': 'func', 'args': [], 'kwargs': {}} func(5) => {'func_name': 'func', 'args': [5], 'kwargs': {}} func(1, 2) => {'func_name': 'func', 'args': [1, 2], 'kwargs': {}} func(a=1, b=2) => {'func_name': 'func', 'args': [], 'kwargs': {'a': 1, 'b': 2}} func(1, 2, a=3, b=4) => {'func_name': 'func', 'args': [1, 2], 'kwargs': {'a':3, 'b':4}} """ matched = function_regexp_compile.match(content) if not matched: raise exception.FunctionNotFound("{} not found!".format(content)) function_meta = { "func_name": matched.group(1), "args": [], "kwargs": {} } args_str = matched.group(2).strip() if args_str == "": return function_meta args_list = args_str.split(',') for arg in args_list: arg = arg.strip() # TODO httprunner兼容 这种方法func(a=1, b=2) => {'func_name': 'func', 'args': [], 'kwargs': {'a': 1, 'b': 2}} # 但是当参数中包含有 = 号时,会处理有问题 # if '=' in arg: # key, value = arg.split('=') # function_meta["kwargs"][key.strip()] = parse_string_value(value.strip()) # else: function_meta["args"].append(parse_string_value(arg)) return function_meta class TestcaseLoader(object): overall_def_dict = { "api": {}, "suite": {} } testcases_cache_mapping = {} @staticmethod def load_test_dependencies(): """ load all api and suite definitions. default api folder is "$CWD/tests/api/". default suite folder is "$CWD/tests/suite/". """ # TODO: cache api and suite loading # load api definitions api_def_folder = os.path.join(os.getcwd(), "tests", "api") for test_file in FileUtils.load_folder_files(api_def_folder): TestcaseLoader.load_api_file(test_file) # load suite definitions suite_def_folder = os.path.join(os.getcwd(), "tests", "suite") for suite_file in FileUtils.load_folder_files(suite_def_folder): suite = TestcaseLoader.load_test_file(suite_file) if "def" not in suite["config"]: raise exception.ParamsError("def missed in suite file: {}!".format(suite_file)) call_func = suite["config"]["def"] function_meta = parse_function(call_func) suite["function_meta"] = function_meta TestcaseLoader.overall_def_dict["suite"][function_meta["func_name"]] = suite @staticmethod def load_api_file(file_path): """ load api definition from file and store in overall_def_dict["api"] api file should be in format below: [ { "api": { "def": "api_login", "request": {}, "validate": [] } }, { "api": { "def": "api_logout", "request": {}, "validate": [] } } ] """ api_items = FileUtils.load_file(file_path) if not isinstance(api_items, list): raise exception.FileFormatError("API format error: {}".format(file_path)) for api_item in api_items: if not isinstance(api_item, dict) or len(api_item) != 1: raise exception.FileFormatError("API format error: {}".format(file_path)) key, api_dict = api_item.popitem() if key != "api" or not isinstance(api_dict, dict) or "def" not in api_dict: raise exception.FileFormatError("API format error: {}".format(file_path)) api_def = api_dict.pop("def") function_meta = parse_function(api_def) func_name = function_meta["func_name"] if func_name in TestcaseLoader.overall_def_dict["api"]: logger.log_warning("API definition duplicated: {}".format(func_name)) api_dict["function_meta"] = function_meta TestcaseLoader.overall_def_dict["api"][func_name] = api_dict @staticmethod def load_test_file(file_path): """ load testcase file or suite file @param file_path: absolute valid file path file_path should be in format below: [ { "config": { "name": "", "def": "suite_order()", "request": {} } }, { "test": { "name": "add product to cart", "api": "api_add_cart()", "validate": [] } }, { "test": { "name": "checkout cart", "request": {}, "validate": [] } } ] @return testset dict { "name": "desc1", "config": {}, "testcases": [testcase11, testcase12] } """ testset = { "name": "", "config": { "path": file_path }, "testcases": [] # TODO: rename to tests } for item in FileUtils.load_file(file_path): if not isinstance(item, dict) or len(item) != 1: raise exception.FileFormatError("Testcase format error: {}".format(file_path)) key, test_block = item.popitem() if not isinstance(test_block, dict): raise exception.FileFormatError("Testcase format error: {}".format(file_path)) if key == "config": testset["config"].update(test_block) testset["name"] = test_block.get("name", "") elif key == "test": if "api" in test_block: ref_call = test_block["api"] def_block = TestcaseLoader._get_block_by_name(ref_call, "api") TestcaseLoader._override_block(def_block, test_block) testset["testcases"].append(test_block) elif "suite" in test_block: ref_call = test_block["suite"] block = TestcaseLoader._get_block_by_name(ref_call, "suite") testset["testcases"].extend(block["testcases"]) else: testset["testcases"].append(test_block) else: logger.log_warning( "unexpected block key: {}. block key should only be 'config' or 'test'.".format(key) ) return testset @staticmethod def _get_block_by_name(ref_call, ref_type): """ get test content by reference name @params: ref_call: e.g. api_v1_Account_Login_POST($UserName, $Password) ref_type: "api" or "suite" """ function_meta = parse_function(ref_call) func_name = function_meta["func_name"] call_args = function_meta["args"] block = TestcaseLoader._get_test_definition(func_name, ref_type) def_args = block.get("function_meta").get("args", []) if len(call_args) != len(def_args): raise exception.ParamsError("call args mismatch defined args!") args_mapping = {} for index, item in enumerate(def_args): if call_args[index] == item: continue args_mapping[item] = call_args[index] if args_mapping: block = substitute_variables_with_mapping(block, args_mapping) return block @staticmethod def _get_test_definition(name, ref_type): """ get expected api or suite. @params: name: api or suite name ref_type: "api" or "suite" @return expected api info if found, otherwise raise ApiNotFound exception """ block = TestcaseLoader.overall_def_dict.get(ref_type, {}).get(name) if not block: err_msg = "{} not found!".format(name) if ref_type == "api": raise exception.ApiNotFound(err_msg) else: # ref_type == "suite": raise exception.SuiteNotFound(err_msg) return block @staticmethod def _override_block(def_block, current_block): """ override def_block with current_block @param def_block: { "name": "get token", "request": {...}, "validate": [{'eq': ['status_code', 200]}] } @param current_block: { "name": "get token", "extract": [{"token": "content.token"}], "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } @return { "name": "get token", "request": {...}, "extract": [{"token": "content.token"}], "validate": [{'eq': ['status_code', 201]}, {'len_eq': ['content.token', 16]}] } """ def_validators = def_block.get("validate") or def_block.get("validators", []) current_validators = current_block.get("validate") or current_block.get("validators", []) def_extrators = def_block.get("extract") \ or def_block.get("extractors") \ or def_block.get("extract_binds", []) current_extractors = current_block.get("extract") \ or current_block.get("extractors") \ or current_block.get("extract_binds", []) current_block.update(def_block) current_block["validate"] = _merge_validator( def_validators, current_validators ) current_block["extract"] = _merge_extractor( def_extrators, current_extractors ) @staticmethod def load_testsets_by_path(path): """ load testcases from file path @param path: path could be in several type - absolute/relative file path - absolute/relative folder path - list/set container with file(s) and/or folder(s) @return testcase sets list, each testset is corresponding to a file [ testset_dict_1, testset_dict_2 ] """ if isinstance(path, (list, set)): testsets = [] for file_path in set(path): testset = TestcaseLoader.load_testsets_by_path(file_path) if not testset: continue testsets.extend(testset) return testsets if not os.path.isabs(path): path = os.path.join(os.getcwd(), path) if path in TestcaseLoader.testcases_cache_mapping: return TestcaseLoader.testcases_cache_mapping[path] if os.path.isdir(path): files_list = FileUtils.load_folder_files(path) testcases_list = TestcaseLoader.load_testsets_by_path(files_list) elif os.path.isfile(path): try: testset = TestcaseLoader.load_test_file(path) if testset["testcases"] or testset["api"]: testcases_list = [testset] else: testcases_list = [] except exception.FileFormatError: testcases_list = [] else: logger.log_error(u"file not found: {}".format(path)) testcases_list = [] TestcaseLoader.testcases_cache_mapping[path] = testcases_list return testcases_list def parse_validator(validator): """ parse validator, validator maybe in two format @param (dict) validator format1: this is kept for compatiblity with the previous versions. {"check": "status_code", "comparator": "eq", "expect": 201} {"check": "$resp_body_success", "comparator": "eq", "expect": True} format2: recommended new version {'eq': ['status_code', 201]} {'eq': ['$resp_body_success', True]} @return (dict) validator info { "check": "status_code", "expect": 201, "comparator": "eq" } """ if not isinstance(validator, dict): raise exception.ParamsError("invalid validator: {}".format(validator)) if "check" in validator and len(validator) > 1: # format1 check_item = validator.get("check") if "expect" in validator: expect_value = validator.get("expect") elif "expected" in validator: expect_value = validator.get("expected") else: raise exception.ParamsError("invalid validator: {}".format(validator)) comparator = validator.get("comparator", "eq") elif len(validator) == 1: # format2 comparator = list(validator.keys())[0] compare_values = validator[comparator] if not isinstance(compare_values, list) or len(compare_values) != 2: raise exception.ParamsError("invalid validator: {}".format(validator)) check_item, expect_value = compare_values else: raise exception.ParamsError("invalid validator: {}".format(validator)) return { "check": check_item, "expect": expect_value, "comparator": comparator } def _get_validators_mapping(validators): """ get validators mapping from api or test validators @param (list) validators: [ {"check": "v1", "expect": 201, "comparator": "eq"}, {"check": {"b": 1}, "expect": 200, "comparator": "eq"} ] @return { ("v1", "eq"): {"check": "v1", "expect": 201, "comparator": "eq"}, ('{"b": 1}', "eq"): {"check": {"b": 1}, "expect": 200, "comparator": "eq"} } """ validators_mapping = {} for validator in validators: validator = parse_validator(validator) if not isinstance(validator["check"], collections.Hashable): check = json.dumps(validator["check"]) else: check = validator["check"] key = (check, validator["comparator"]) validators_mapping[key] = validator return validators_mapping def _merge_validator(def_validators, current_validators): """ merge def_validators with current_validators @params: def_validators: [{'eq': ['v1', 200]}, {"check": "s2", "expect": 16, "comparator": "len_eq"}] current_validators: [{"check": "v1", "expect": 201}, {'len_eq': ['s3', 12]}] @return: [ {"check": "v1", "expect": 201, "comparator": "eq"}, {"check": "s2", "expect": 16, "comparator": "len_eq"}, {"check": "s3", "expect": 12, "comparator": "len_eq"} ] """ if not def_validators: return current_validators elif not current_validators: return def_validators else: api_validators_mapping = _get_validators_mapping(def_validators) test_validators_mapping = _get_validators_mapping(current_validators) api_validators_mapping.update(test_validators_mapping) return list(api_validators_mapping.values()) def _merge_extractor(def_extrators, current_extractors): """ merge def_extrators with current_extractors @params: def_extrators: [{"var1": "val1"}, {"var2": "val2"}] current_extractors: [{"var1": "val111"}, {"var3": "val3"}] @return: [ {"var1": "val111"}, {"var2": "val2"}, {"var3": "val3"} ] """ if not def_extrators: return current_extractors elif not current_extractors: return def_extrators else: extractor_dict = OrderedDict() for api_extrator in def_extrators: if len(api_extrator) != 1: logger.log_warning("incorrect extractor: {}".format(api_extrator)) continue var_name = list(api_extrator.keys())[0] extractor_dict[var_name] = api_extrator[var_name] for test_extrator in current_extractors: if len(test_extrator) != 1: logger.log_warning("incorrect extractor: {}".format(test_extrator)) continue var_name = list(test_extrator.keys())[0] extractor_dict[var_name] = test_extrator[var_name] extractor_list = [] for key, value in extractor_dict.items(): extractor_list.append({key: value}) return extractor_list def is_testset(data_structure): """ check if data_structure is a testset testset should always be in the following data structure: { "name": "desc1", "config": {}, "api": {}, "testcases": [testcase11, testcase12] } """ if not isinstance(data_structure, dict): return False if "name" not in data_structure or "testcases" not in data_structure: return False if not isinstance(data_structure["testcases"], list): return False return True def is_testsets(data_structure): """ check if data_structure is testset or testsets testsets should always be in the following data structure: testset_dict or [ testset_dict_1, testset_dict_2 ] """ if not isinstance(data_structure, list): return is_testset(data_structure) for item in data_structure: if not is_testset(item): return False return True def substitute_variables_with_mapping(content, mapping): """ substitute variables in content with mapping e.g. @params content = { 'request': { 'url': '/api/users/$uid', 'headers': {'token': '$token'} } } mapping = {"$uid": 1000} @return { 'request': { 'url': '/api/users/1000', 'headers': {'token': '$token'} } } """ # TODO: refactor type check if isinstance(content, bool): return content if isinstance(content, (numeric_types, type)): return content if not content: return content if isinstance(content, (list, set, tuple)): return [ substitute_variables_with_mapping(item, mapping) for item in content ] if isinstance(content, dict): substituted_data = {} for key, value in content.items(): eval_key = substitute_variables_with_mapping(key, mapping) eval_value = substitute_variables_with_mapping(value, mapping) substituted_data[eval_key] = eval_value return substituted_data # content is in string format here for var, value in mapping.items(): if content == var: # content is a variable content = value else: content = content.replace(var, str(value)) return content def gen_cartesian_product(*args): """ generate cartesian product for lists @param (list) args [{"a": 1}, {"a": 2}], [ {"x": 111, "y": 112}, {"x": 121, "y": 122} ] @return cartesian product in list [ {'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122} ] """ if not args: return [] elif len(args) == 1: return args[0] product_list = [] for product_item_tuple in itertools.product(*args): product_item_dict = {} for item in product_item_tuple: product_item_dict.update(item) product_list.append(product_item_dict) return product_list def parse_parameters(parameters, testset_path=None): """ parse parameters and generate cartesian product @params (list) parameters: parameter name and value in list parameter value may be in three types: (1) data list (2) call built-in parameterize function (3) call custom function in debugtalk.py e.g. [ {"user_agent": ["iOS/10.1", "iOS/10.2", "iOS/10.3"]}, {"username-password": "${parameterize(account.csv)}"}, {"app_version": "${gen_app_version()}"} ] (str) testset_path: testset file path, used for locating csv file and debugtalk.py @return cartesian product in list """ testcase_parser = TestcaseParser(file_path=testset_path) parsed_parameters_list = [] for parameter in parameters: parameter_name, parameter_content = list(parameter.items())[0] parameter_name_list = parameter_name.split("-") if isinstance(parameter_content, list): # (1) data list # e.g. {"app_version": ["2.8.5", "2.8.6"]} # => [{"app_version": "2.8.5", "app_version": "2.8.6"}] # e.g. {"username-password": [["user1", "111111"], ["test2", "222222"]} # => [{"username": "user1", "password": "111111"}, {"username": "user2", "password": "222222"}] parameter_content_list = [] for parameter_item in parameter_content: if not isinstance(parameter_item, (list, tuple)): # "2.8.5" => ["2.8.5"] parameter_item = [parameter_item] # ["app_version"], ["2.8.5"] => {"app_version": "2.8.5"} # ["username", "password"], ["user1", "111111"] => {"username": "user1", "password": "111111"} parameter_content_dict = dict(zip(parameter_name_list, parameter_item)) parameter_content_list.append(parameter_content_dict) else: # (2) & (3) parsed_parameter_content = testcase_parser.eval_content_with_bindings(parameter_content) # e.g. [{'app_version': '2.8.5'}, {'app_version': '2.8.6'}] # e.g. [{"username": "user1", "password": "111111"}, {"username": "user2", "password": "222222"}] if not isinstance(parsed_parameter_content, list): raise exception.ParamsError("parameters syntax error!") parameter_content_list = [ # get subset by parameter name {key: parameter_item[key] for key in parameter_name_list} for parameter_item in parsed_parameter_content ] parsed_parameters_list.append(parameter_content_list) return gen_cartesian_product(*parsed_parameters_list) class TestcaseParser(object): def __init__(self, variables={}, functions={}, file_path=None): self.update_binded_variables(variables) self.bind_functions(functions) self.file_path = file_path def update_binded_variables(self, variables): """ bind variables to current testcase parser @param (dict) variables, variables binds mapping { "authorization": "a83de0ff8d2e896dbd8efb81ba14e17d", "random": "A2dEx", "data": {"name": "user", "password": "123456"}, "uuid": 1000 } """ self.variables = variables def bind_functions(self, functions): """ bind functions to current testcase parser @param (dict) functions, functions binds mapping { "add_two_nums": lambda a, b=1: a + b } """ self.functions = functions def _get_bind_item(self, item_type, item_name): if item_type == "function": if item_name in self.functions: return self.functions[item_name] try: # check if builtin functions item_func = eval(item_name) if callable(item_func): # is builtin function return item_func except (NameError, TypeError): # is not builtin function, continue to search pass elif item_type == "variable": if item_name in self.variables: return self.variables[item_name] else: raise exception.ParamsError("bind item should only be function or variable.") try: assert self.file_path is not None return utils.search_conf_item(self.file_path, item_type, item_name) except (AssertionError, exception.FunctionNotFound): raise exception.ParamsError( "{} is not defined in bind {}s!".format(item_name, item_type)) def get_bind_function(self, func_name): return self._get_bind_item("function", func_name) def get_bind_variable(self, variable_name): return self._get_bind_item("variable", variable_name) def parameterize(self, csv_file_name, fetch_method="Sequential"): parameter_file_path = os.path.join( os.path.dirname(self.file_path), "{}".format(csv_file_name) ) csv_content_list = FileUtils.load_file(parameter_file_path) if fetch_method.lower() == "random": random.shuffle(csv_content_list) return csv_content_list def _eval_content_functions(self, content): functions_list = extract_functions(content) for func_content in functions_list: function_meta = parse_function(func_content) func_name = function_meta['func_name'] args = function_meta.get('args', []) kwargs = function_meta.get('kwargs', {}) args = self.eval_content_with_bindings(args) kwargs = self.eval_content_with_bindings(kwargs) if func_name in ["parameterize", "P"]: eval_value = self.parameterize(*args, **kwargs) else: func = self.get_bind_function(func_name) eval_value = func(*args, **kwargs) func_content = "${" + func_content + "}" if func_content == content: # content is a variable content = eval_value else: # content contains one or many variables content = content.replace( func_content, str(eval_value), 1 ) return content def _eval_content_variables(self, content): """ replace all variables of string content with mapping value. @param (str) content @return (str) parsed content e.g. variable_mapping = { "var_1": "abc", "var_2": "def" } $var_1 => "abc" $var_1#XYZ => "abc#XYZ" /$var_1/$var_2/var3 => "/abc/def/var3" ${func($var_1, $var_2, xyz)} => "${func(abc, def, xyz)}" """ variables_list = extract_variables(content) for variable_name in variables_list: variable_value = self.get_bind_variable(variable_name) if "${}".format(variable_name) == content: # content is a variable content = variable_value else: # content contains one or several variables content = content.replace( "${}".format(variable_name), str(variable_value), 1 ) return content def eval_content_with_bindings(self, content): """ parse content recursively, each variable and function in content will be evaluated. @param (dict) content in any data structure { "url": "http://127.0.0.1:5000/api/users/$uid/${add_two_nums(1, 1)}", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random", "sum": "${add_two_nums(1, 2)}" }, "body": "$data" } @return (dict) parsed content with evaluated bind values { "url": "http://127.0.0.1:5000/api/users/1000/2", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "a83de0ff8d2e896dbd8efb81ba14e17d", "random": "A2dEx", "sum": 3 }, "body": {"name": "user", "password": "123456"} } """ if content is None: return None if isinstance(content, (list, tuple)): return [ self.eval_content_with_bindings(item) for item in content ] if isinstance(content, dict): evaluated_data = {} for key, value in content.items(): eval_key = self.eval_content_with_bindings(key) eval_value = self.eval_content_with_bindings(value) evaluated_data[eval_key] = eval_value return evaluated_data if isinstance(content, basestring): # content is in string format here content = content.strip() # replace functions with evaluated value # Notice: _eval_content_functions must be called before _eval_content_variables content = self._eval_content_functions(content) # replace variables with binding value content = self._eval_content_variables(content) return content
34.50915
113
0.541969
4a014b5f0743fad5e621d3f7a52eddaafdee946a
76,005
py
Python
src/main_dlg.py
TheSin-/terracoin-masternode-tool
9d670c89a428cdbdc359234c0d1b83ce6ff569a2
[ "MIT" ]
null
null
null
src/main_dlg.py
TheSin-/terracoin-masternode-tool
9d670c89a428cdbdc359234c0d1b83ce6ff569a2
[ "MIT" ]
null
null
null
src/main_dlg.py
TheSin-/terracoin-masternode-tool
9d670c89a428cdbdc359234c0d1b83ce6ff569a2
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Author: Bertrand256 # Created on: 2017-03 import base64 import binascii import datetime import json import os import platform import re import sys import threading import time import bitcoin import logging from PyQt5 import QtCore from PyQt5 import QtWidgets from PyQt5.QtCore import QSize, pyqtSlot, QEventLoop, QMutex, QWaitCondition, QUrl from PyQt5.QtGui import QFont, QIcon, QDesktopServices from PyQt5.QtGui import QPixmap from PyQt5.QtWidgets import QFileDialog, QMenu, QMainWindow, QPushButton, QStyle, QInputDialog from PyQt5.QtWidgets import QMessageBox from config_dlg import ConfigDlg from find_coll_tx_dlg import FindCollateralTxDlg import about_dlg import app_cache as cache import terracoin_utils import hw_pass_dlg import hw_pin_dlg import send_payout_dlg import app_utils from initialize_hw_dlg import HwInitializeDlg from proposals_dlg import ProposalsDlg from app_config import AppConfig, MasterNodeConfig, APP_NAME_LONG, APP_NAME_SHORT, PROJECT_URL from terracoin_utils import bip32_path_n_to_string from terracoind_intf import TerracoindInterface, TerracoindIndexException from hw_common import HardwareWalletCancelException, HardwareWalletPinException import hw_intf from hw_setup_dlg import HwSetupDlg from psw_cache import SshPassCache from sign_message_dlg import SignMessageDlg from wnd_utils import WndUtils from ui import ui_main_dlg from app_config import HWType class MainWindow(QMainWindow, WndUtils, ui_main_dlg.Ui_MainWindow): update_status_signal = QtCore.pyqtSignal(str, str) # signal for updating status text from inside thread def __init__(self, app_path): QMainWindow.__init__(self) WndUtils.__init__(self, None) ui_main_dlg.Ui_MainWindow.__init__(self) self.config = AppConfig() self.config.init(app_path) WndUtils.set_app_config(self, self.config) self.terracoind_intf = TerracoindInterface(self.config, window=None, on_connection_begin_callback=self.on_connection_begin, on_connection_try_fail_callback=self.on_connection_failed, on_connection_finished_callback=self.on_connection_finished) self.terracoind_info = {} self.is_terracoind_syncing = False self.terracoind_connection_ok = False self.connecting_to_terracoind = False self.hw_client = None self.curMasternode = None self.editingEnabled = False self.app_path = app_path # bip32 cache: # { "terracoin_address_of_the_parent": { bip32_path: terracoin_address } self.bip32_cache = { } self.setupUi() def setupUi(self): ui_main_dlg.Ui_MainWindow.setupUi(self, self) self.setWindowTitle(APP_NAME_LONG + ' by TheSin' + ( ' (v. ' + self.config.app_version + ')' if self.config.app_version else '')) SshPassCache.set_parent_window(self) self.inside_setup_ui = True self.terracoind_intf.window = self self.btnHwBip32ToAddress.setEnabled(False) # self.edtMnStatus.setReadOnly(True) # self.edtMnStatus.setStyleSheet('QLineEdit{background-color: lightgray}') self.closeEvent = self.closeEvent self.lblStatus1 = QtWidgets.QLabel(self) self.lblStatus1.setAutoFillBackground(False) self.lblStatus1.setOpenExternalLinks(True) self.statusBar.addPermanentWidget(self.lblStatus1, 1) self.lblStatus1.setText('') self.lblStatus2 = QtWidgets.QLabel(self) self.statusBar.addPermanentWidget(self.lblStatus2, 2) self.lblStatus2.setText('') img = QPixmap(os.path.join(self.app_path, "img/tmt.png")) img = img.scaled(QSize(64, 64)) self.lblAbout.setPixmap(img) self.setStatus1Text('<b>RPC network status:</b> not connected', 'black') self.setStatus2Text('<b>HW status:</b> idle', 'black') if sys.platform == 'win32': # improve buttons' ugly look on windows styleSheet = """QPushButton {padding: 3px 10px 3px 10px}""" btns = self.groupBox.findChildren(QPushButton) for btn in btns: btn.setStyleSheet(styleSheet) # set stylesheet for editboxes, supporting different colors for read-only and edting mode styleSheet = """ QLineEdit{background-color: white} QLineEdit:read-only{background-color: lightgray} """ self.setStyleSheet(styleSheet) self.setIcon(self.btnHwCheck, 'hw-test.ico') self.setIcon(self.btnHwDisconnect, "hw-lock.ico") self.setIcon(self.btnHwAddressToBip32, QStyle.SP_ArrowRight) self.setIcon(self.btnHwBip32ToAddress, QStyle.SP_ArrowLeft) self.setIcon(self.btnConfiguration, "gear.png") self.setIcon(self.btnProposals, "thumb-up.png") self.setIcon(self.btnActions, "tools.png") self.setIcon(self.btnCheckConnection, QStyle.SP_CommandLink) self.setIcon(self.btnSaveConfiguration, QStyle.SP_DriveFDIcon) self.setIcon(self.btnAbout, QStyle.SP_MessageBoxInformation) # create popup menu for actions button mnu = QMenu() # transfer for current mn self.actTransferFundsSelectedMn = mnu.addAction("Transfer funds from current masternode's address...") self.setIcon(self.actTransferFundsSelectedMn, "dollar.png") self.actTransferFundsSelectedMn.triggered.connect(self.on_actTransferFundsSelectedMn_triggered) # transfer for all mns self.actTransferFundsForAllMns = mnu.addAction("Transfer funds from all Masternodes addresses...") self.setIcon(self.actTransferFundsForAllMns, "money-bag.png") self.actTransferFundsForAllMns.triggered.connect(self.on_actTransferFundsForAllMns_triggered) # transfer for a specified address/bip32 path self.actTransferFundsForAddress = mnu.addAction("Transfer funds from any HW address...") self.setIcon(self.actTransferFundsForAddress, "wallet.png") self.actTransferFundsForAddress.triggered.connect(self.on_actTransferFundsForAddress_triggered) # sign message with HW self.actSignMessageWithHw = mnu.addAction("Sign message with HW for current Masternode's address...") self.setIcon(self.actSignMessageWithHw, "sign.png") self.actSignMessageWithHw.triggered.connect(self.on_actSignMessageWithHw_triggered) # hardware wallet setup tools self.actHwSetup = mnu.addAction("Hardware wallet PIN/passphrase configuration...") self.setIcon(self.actHwSetup, "hw.png") self.actHwSetup.triggered.connect(self.on_actHwSetup_triggered) # hardware wallet initialization dialog self.actHwSetup = mnu.addAction("Hardware wallet initialization/recovery...") self.setIcon(self.actHwSetup, "recover.png") self.actHwSetup.triggered.connect(self.on_actHwInitialize_triggered) mnu.addSeparator() # the "check for updates" menu item self.actCheckForUpdates = mnu.addAction("Check for updates") self.actCheckForUpdates.triggered.connect(self.on_actCheckForUpdates_triggered) self.btnActions.setMenu(mnu) # the "log file" menu item self.actLogFile = mnu.addAction('Open log file (%s)' % self.config.log_file) self.actLogFile.triggered.connect(self.on_actLogFile_triggered) # add masternodes' info to the combobox self.cboMasternodes.clear() for mn in self.config.masternodes: self.cboMasternodes.addItem(mn.name, mn) if self.config.masternodes: # get last masternode selected idx = cache.get_value('WndMainCurMasternodeIndex', 0, int) if idx >= len(self.config.masternodes): idx = 0 self.curMasternode = self.config.masternodes[idx] self.displayMasternodeConfig(True) else: self.curMasternode = None # after loading whole configuration, reset 'modified' variable self.config.modified = False self.updateControlsState() self.setMessage("", None) self.on_actCheckForUpdates_triggered(True, force_check=False) self.inside_setup_ui = False self.config.start_cache() logging.info('Finished setup of the main dialog.') @pyqtSlot(bool) def on_actCheckForUpdates_triggered(self, checked, force_check=True): if self.config.check_for_updates: cur_date = datetime.datetime.now().strftime('%Y-%m-%d') self.runInThread(self.checkForUpdates, (cur_date, force_check)) @pyqtSlot(bool) def on_actLogFile_triggered(self, checked): if os.path.exists(self.config.log_file): ret = QDesktopServices.openUrl(QUrl("file:///%s" % self.config.log_file)) if not ret: self.warnMsg('Could not open "%s" file in a default OS application.' % self.config.log_file) def checkForUpdates(self, ctrl, cur_date_str, force_check): """ Thread function, checking on GitHub if there is a new version of the application. :param ctrl: thread control structure (not used here) :param cur_date_str: Current date string - it will be saved in the cache file as the date of the last-version-check date. :param force_check: True if version-check has been invoked by the user, not the app itself. :return: None """ try: import urllib.request response = urllib.request.urlopen( 'https://raw.githubusercontent.com/TheSin-/terracoin-masternode-tool/master/version.txt') contents = response.read() lines = contents.decode().splitlines() remote_version_str = app_utils.extract_app_version(lines) remote_ver = app_utils.version_str_to_number(remote_version_str) local_ver = app_utils.version_str_to_number(self.config.app_version) cache.set_value('check_for_updates_last_date', cur_date_str) if remote_ver > local_ver: if sys.platform == 'win32': item_name = 'exe_win' no_bits = platform.architecture()[0].replace('bit', '') if no_bits == '32': item_name += '32' elif sys.platform == 'darwin': item_name = 'exe_mac' else: item_name = 'exe_linux' exe_url = '' for line in lines: elems = [x.strip() for x in line.split('=')] if len(elems) == 2 and elems[0] == item_name: exe_url = elems[1].strip("'") break if exe_url: msg = "New version (" + remote_version_str + ') available: <a href="' + exe_url + '">download</a>.' else: msg = "New version (" + remote_version_str + ') available. Go to the project website: <a href="' + PROJECT_URL + '">open</a>.' self.setMessage(msg, 'green') else: if force_check: self.setMessage("You have the latest version of %s." % APP_NAME_SHORT, 'green') except Exception as e: pass def closeEvent(self, event): if self.terracoind_intf: self.terracoind_intf.disconnect() if self.configModified(): if self.queryDlg('Configuration modified. Save?', buttons=QMessageBox.Yes | QMessageBox.No, default_button=QMessageBox.Yes, icon=QMessageBox.Information) == QMessageBox.Yes: self.on_btnSaveConfiguration_clicked(True) self.config.close() def displayMasternodeConfig(self, set_mn_list_index): if self.curMasternode and set_mn_list_index: self.cboMasternodes.setCurrentIndex(self.config.masternodes.index(self.curMasternode)) try: if self.curMasternode: self.curMasternode.lock_modified_change = True self.edtMnName.setText(self.curMasternode.name if self.curMasternode else '') self.edtMnIp.setText(self.curMasternode.ip if self.curMasternode else '') self.edtMnPort.setText(str(self.curMasternode.port) if self.curMasternode else '') self.edtMnPrivateKey.setText(self.curMasternode.privateKey if self.curMasternode else '') self.edtMnCollateralBip32Path.setText(self.curMasternode.collateralBip32Path if self.curMasternode else '') self.edtMnCollateralAddress.setText(self.curMasternode.collateralAddress if self.curMasternode else '') self.edtMnCollateralTx.setText(self.curMasternode.collateralTx if self.curMasternode else '') self.edtMnCollateralTxIndex.setText(self.curMasternode.collateralTxIndex if self.curMasternode else '') use_default_protocol = self.curMasternode.use_default_protocol_version if self.curMasternode else True self.chbUseDefaultProtocolVersion.setChecked(use_default_protocol) self.edtMnProtocolVersion.setText(self.curMasternode.protocol_version if self.curMasternode else '') self.edtMnProtocolVersion.setVisible(not use_default_protocol) self.lblMnStatus.setText('') finally: if self.curMasternode: self.curMasternode.lock_modified_change = False @pyqtSlot(bool) def on_btnConfiguration_clicked(self): dlg = ConfigDlg(self, self.config) dlg.exec_() del dlg def connsCfgChanged(self): """ If connections config is changed, we must apply the changes to the terracoind interface object :return: """ try: self.terracoind_intf.apply_new_cfg() self.updateControlsState() except Exception as e: self.errorMsg(str(e)) @pyqtSlot(bool) def on_btnAbout_clicked(self): ui = about_dlg.AboutDlg(self, self.config.app_version) ui.exec_() def on_connection_begin(self): """ Called just before establising connection to a terracoin RPC. """ self.setStatus1Text('<b>RPC network status:</b> trying %s...' % self.terracoind_intf.get_active_conn_description(), 'black') def on_connection_failed(self): """ Called after failed connection attempt. There can be more attempts to connect to another nodes if there are such in configuration. """ self.setStatus1Text('<b>RPC network status:</b> failed connection to %s' % self.terracoind_intf.get_active_conn_description(), 'red') def on_connection_finished(self): """ Called after connection to terracoin daemon sucessufully establishes. """ logging.debug("on_connection_finished") self.setStatus1Text('<b>RPC network status:</b> OK (%s)' % self.terracoind_intf.get_active_conn_description(), 'green') def checkTerracoindConnection(self, wait_for_check_finish=False, call_on_check_finished=None): """ Connects do terracoin daemon if not connected before and returnes if it was successful. :param wait_for_check_finish: True if function is supposed to wait until connection check is finished (process is executed in background) :param call_on_check_finished: ref to function to be called after connection test (successful or unsuccessful) is finished """ # if wait_for_check_finish is True, we have to process QT events while waiting for thread to terminate to # avoid deadlocking of functions: connect_thread and connect_finished if wait_for_check_finish: event_loop = QEventLoop(self) else: event_loop = None def wait_for_synch_finished_thread(ctrl): """ Thread waiting for terracoin daemon to finish synchronizing. """ mtx = QMutex() cond = QWaitCondition() try: logging.info('wait_for_synch_finished_thread') mtx.lock() while not ctrl.finish: synced = self.terracoind_intf.issynchronized() if synced: self.is_terracoind_syncing = False self.on_connection_finished() break mnsync = self.terracoind_intf.mnsync() self.setMessage('Terracoind is synchronizing: AssetID: %s, AssetName: %s' % (str(mnsync.get('AssetID', '')), str(mnsync.get('AssetName', '')) ), style='{background-color:rgb(255,128,0);color:white;padding:3px 5px 3px 5px; border-radius:3px}') cond.wait(mtx, 5000) self.setMessage('') except Exception as e: self.is_terracoind_syncing = False self.terracoind_connection_ok = False self.setMessage(str(e), style='{background-color:red;color:white;padding:3px 5px 3px 5px; border-radius:3px}') finally: mtx.unlock() self.wait_for_terracoind_synced_thread = None def connect_thread(ctrl): """ Test connection to terracoin network inside a thread to avoid blocking GUI. :param ctrl: control structure to communicate with WorkerThread object (not used here) """ try: synced = self.terracoind_intf.issynchronized() self.terracoind_info = self.terracoind_intf.getinfo() self.terracoind_connection_ok = True if not synced: logging.info("terracoind not synced") if not self.is_terracoind_syncing and not (hasattr(self, 'wait_for_terracoind_synced_thread') and self.wait_for_terracoind_synced_thread is not None): self.is_terracoind_syncing = True self.wait_for_terracoind_synced_thread = self.runInThread(wait_for_synch_finished_thread, (), on_thread_finish=connect_finished) else: self.is_terracoind_syncing = False self.setMessage('') except Exception as e: err = str(e) if not err: err = 'Connect error: %s' % type(e).__name__ self.is_terracoind_syncing = False self.terracoind_connection_ok = False self.on_connection_failed() self.setMessage(err, style='{background-color:red;color:white;padding:3px 5px 3px 5px; border-radius:3px}') def connect_finished(): """ Called after thread terminates. """ del self.check_conn_thread self.check_conn_thread = None self.connecting_to_terracoind = False if call_on_check_finished: call_on_check_finished() if event_loop: event_loop.exit() if self.config.is_config_complete(): if not hasattr(self, 'check_conn_thread') or self.check_conn_thread is None: if hasattr(self, 'wait_for_terracoind_synced_thread') and self.wait_for_terracoind_synced_thread is not None: if call_on_check_finished is not None: # if a thread waiting for terracoind to finish synchronizing is running, call the callback function call_on_check_finished() else: self.connecting_to_terracoind = True self.check_conn_thread = self.runInThread(connect_thread, (), on_thread_finish=connect_finished) if wait_for_check_finish: event_loop.exec() else: # configuration is not complete logging.warning("config not complete") self.is_terracoind_syncing = False self.terracoind_connection_ok = False @pyqtSlot(bool) def on_btnCheckConnection_clicked(self): def connection_test_finished(): self.btnCheckConnection.setEnabled(True) self.btnBroadcastMn.setEnabled(True) self.btnRefreshMnStatus.setEnabled(True) self.btnActions.setEnabled(True) if self.terracoind_connection_ok: if self.is_terracoind_syncing: self.infoMsg('Connection successful, but Terracoin daemon is synchronizing.') else: self.infoMsg('Connection successful.') else: if self.terracoind_intf.last_error_message: self.errorMsg('Connection error: ' + self.terracoind_intf.last_error_message) else: self.errorMsg('Connection error') if self.config.is_config_complete(): self.btnCheckConnection.setEnabled(False) self.btnBroadcastMn.setEnabled(False) self.btnRefreshMnStatus.setEnabled(False) self.btnActions.setEnabled(False) self.checkTerracoindConnection(call_on_check_finished=connection_test_finished) else: # configuration not complete: show config window if self.queryDlg("There is no (enabled) connections to RPC node in your configuration. Open configuration dialog?", buttons=QMessageBox.Yes | QMessageBox.Cancel, default_button=QMessageBox.Yes, icon=QMessageBox.Warning) == QMessageBox.Yes: self.on_btnConfiguration_clicked() def setStatus1Text(self, text, color): def set_status(text, color): self.lblStatus1.setText(text) if not color: color = 'black' self.lblStatus1.setStyleSheet('QLabel{color: ' + color + ';margin-right:20px;margin-left:8px}') if threading.current_thread() != threading.main_thread(): self.call_in_main_thread(set_status, text, color) else: set_status(text, color) def setStatus2Text(self, text, color): def set_status(text, color): self.lblStatus2.setText(text) if not color: color = 'black' self.lblStatus2.setStyleSheet('QLabel{color: ' + color + '}') if threading.current_thread() != threading.main_thread(): self.call_in_main_thread(set_status, text, color) else: set_status(text, color) def setMessage(self, text, color=None, style=None): """ Display message in the app message area. :param text: Text to be displayed. If Text is empty, message area will be hidden. :param color: Color of thext. """ def set_message(text, color, style): left, top, right, bottom = self.layMessage.getContentsMargins() if not text: self.lblMessage.setVisible(False) self.layMessage.setContentsMargins(left, top, right, 0) else: self.lblMessage.setVisible(True) self.lblMessage.setText(text) self.layMessage.setContentsMargins(left, top, right, 4) if color: style = '{color:%s}' % color if style: self.lblMessage.setStyleSheet('QLabel%s' % style) if threading.current_thread() != threading.main_thread(): self.call_in_main_thread(set_message, text, color, style) else: set_message(text, color, style) def getHwName(self): if self.config.hw_type == HWType.trezor: return 'Trezor' elif self.config.hw_type == HWType.keepkey: return 'KeepKey' elif self.config.hw_type == HWType.ledger_nano_s: return 'Ledger Nano S' else: return 'Unknown HW Type' def connectHardwareWallet(self): """ Connects to hardware wallet if not connected before. :return: True, if successfully connected, False if not """ ret = None if self.hw_client: cur_hw_type = hw_intf.get_hw_type(self.hw_client) if self.config.hw_type != cur_hw_type: self.on_btnHwDisconnect_clicked() if not self.hw_client: try: try: logging.info('Connecting to a hardware wallet device') self.hw_client = hw_intf.connect_hw(passphrase_encoding=self.config.hw_keepkey_psw_encoding, hw_type=self.config.hw_type) logging.info('Connected to a hardware wallet') self.setStatus2Text('<b>HW status:</b> connected to %s' % hw_intf.get_hw_label(self, self.hw_client), 'green') self.updateControlsState() except Exception as e: self.hw_client = None logging.info('Could not connect to a hardware wallet') self.setStatus2Text('<b>HW status:</b> cannot connect to %s device' % self.getHwName(), 'red') self.errorMsg(str(e)) ret = self.hw_client except HardwareWalletPinException as e: self.errorMsg(e.msg) if self.hw_client: self.hw_client.clear_session() self.updateControlsState() except OSError as e: logging.exception('Exception occurred') self.errorMsg('Cannot open %s device.' % self.getHwName()) self.updateControlsState() except Exception as e: logging.exception('Exception occurred') self.errorMsg(str(e)) if self.hw_client: self.hw_client.init_device() self.updateControlsState() else: ret = self.hw_client return ret def btnConnectTrezorClick(self): self.connectHardwareWallet() @pyqtSlot(bool) def on_btnHwCheck_clicked(self): self.connectHardwareWallet() self.updateControlsState() if self.hw_client: try: if self.config.hw_type in (HWType.trezor, HWType.keepkey): features = self.hw_client.features hw_intf.ping(self, 'Hello, press the button', button_protection=False, pin_protection=features.pin_protection, passphrase_protection=features.passphrase_protection) self.infoMsg('Connection to %s device (%s) successful.' % (self.getHwName(), hw_intf.get_hw_label(self, self.hw_client))) elif self.config.hw_type == HWType.ledger_nano_s: self.infoMsg('Connection to %s device successful.' % (self.getHwName(),)) except HardwareWalletCancelException: if self.hw_client: self.hw_client.init_device() def disconnectHardwareWallet(self): if self.hw_client: hw_intf.disconnect_hw(self.hw_client) del self.hw_client self.hw_client = None self.setStatus2Text('<b>HW status:</b> idle', 'black') self.updateControlsState() @pyqtSlot(bool) def on_btnHwDisconnect_clicked(self): self.disconnectHardwareWallet() @pyqtSlot(bool) def on_btnNewMn_clicked(self): self.newMasternodeConfig() @pyqtSlot(bool) def on_btnDeleteMn_clicked(self): if self.curMasternode: msg = QMessageBox() msg.setIcon(QMessageBox.Warning) msg.setText('Do you really want to delete current masternode configuration?') msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No) msg.setDefaultButton(QMessageBox.No) retval = msg.exec_() if retval == QMessageBox.No: return self.config.masternodes.remove(self.curMasternode) self.cboMasternodes.removeItem(self.cboMasternodes.currentIndex()) self.config.modified = True self.updateControlsState() @pyqtSlot(bool) def on_btnEditMn_clicked(self): self.editingEnabled = True self.updateControlsState() def hwScanForBip32Paths(self, addresses): """ Scans hardware wallet for bip32 paths of all Terracoin addresses passed in the addresses list. :param addresses: list of Terracoin addresses to scan :return: dict {terracoin_address: bip32_path} """ def scan_for_bip32_thread(ctrl, addresses): """ Function run inside a thread which purpose is to scan hawrware wallet for a bip32 paths with given Terracoin addresses. :param cfg: Thread dialog configuration object. :param addresses: list of Terracoin addresses to find bip32 path :return: """ paths_found = 0 paths_checked = 0 found_adresses = {} user_cancelled = False ctrl.dlg_config_fun(dlg_title="Scanning hardware wallet...", show_progress_bar=False) self.connectHardwareWallet() if self.hw_client: # get terracoin address of the parent address_n = [2147483692, # 44' 2147483731, # 83' ] addr_of_cur_path = hw_intf.get_address(self, address_n) b32cache = self.bip32_cache.get(addr_of_cur_path, None) modified_b32cache = False cache_file = os.path.join(self.config.cache_dir, 'bip32cache_%s.json' % addr_of_cur_path) if not b32cache: # entry for parrent address was not scanned since starting the app, find cache file on disk try: # looking into cache first b32cache = json.load(open(cache_file)) except: # cache file not found b32cache = {} # create in cache entry for tree beginning from our parent path (different hw passphrase # gives different bip32 parent path) self.bip32_cache[addr_of_cur_path] = b32cache for addr_to_find_bip32 in addresses: if not found_adresses.get(addr_to_find_bip32): # check 10 addresses of account 0 (44'/83'/0'/0), then 10 addreses # of account 1 (44'/83'/1'/0) and so on until 9th account. # if not found, then check next 10 addresses of account 0 (44'/83'/0'/0) # and so on; we assume here, that user rather puts collaterals # under first addresses of subsequent accounts than under far addresses # of the first account; if so, following iteration shuld be faster found = False if ctrl.finish: break for tenth_nr in range(0, 10): if ctrl.finish: break for account_nr in range(0, 10): if ctrl.finish: break for index in range(0, 10): if ctrl.finish: break address_n = [2147483692, # 44' 2147483731, # 83' 2147483648 + account_nr, # 0' + account_nr 0, (tenth_nr * 10) + index] cur_bip32_path = bip32_path_n_to_string(address_n) ctrl.display_msg_fun( '<b>Scanning hardware wallet for BIP32 paths, please wait...</b><br><br>' 'Paths scanned: <span style="color:black">%d</span><br>' 'Keys found: <span style="color:green">%d</span><br>' 'Current path: <span style="color:blue">%s</span><br>' % (paths_checked, paths_found, cur_bip32_path)) # first, find terracoin address in cache by bip32 path addr_of_cur_path = b32cache.get(cur_bip32_path, None) if not addr_of_cur_path: addr_of_cur_path = hw_intf.get_address(self, address_n) b32cache[cur_bip32_path] = addr_of_cur_path modified_b32cache = True paths_checked += 1 if addr_to_find_bip32 == addr_of_cur_path: found_adresses[addr_to_find_bip32] = cur_bip32_path found = True paths_found += 1 break elif not found_adresses.get(addr_of_cur_path, None) and \ addr_of_cur_path in addresses: # address of current bip32 path is in the search list found_adresses[addr_of_cur_path] = cur_bip32_path if found: break if found: break if modified_b32cache: # save modified cache to file if cache_file: try: # saving into cache json.dump(b32cache, open(cache_file, 'w')) except Exception as e: pass if ctrl.finish: user_cancelled = True return found_adresses, user_cancelled paths_found, user_cancelled = self.threadFunctionDialog(scan_for_bip32_thread, (addresses,), True, buttons=[{'std_btn': QtWidgets.QDialogButtonBox.Cancel}], center_by_window=self) return paths_found, user_cancelled @pyqtSlot(bool) def on_btnImportMasternodesConf_clicked(self): """ Imports masternodes configuration from masternode.conf file. """ file_name = self.open_file_query(message='Enter the path to the masternode.conf configuration file', directory='', filter="All Files (*);;Conf files (*.conf)", initial_filter="Conf files (*.conf)") if file_name: if os.path.exists(file_name): if not self.editingEnabled: self.on_btnEditMn_clicked() try: with open(file_name, 'r') as f_ptr: modified = False imported_cnt = 0 skipped_cnt = 0 mns_imported = [] for line in f_ptr.readlines(): line = line.strip() if not line: continue elems = line.split() if len(elems) >= 5 and not line.startswith('#'): mn_name = elems[0] mn_ipport = elems[1] mn_privkey = elems[2] mn_tx_hash = elems[3] mn_tx_idx = elems[4] mn_terracoin_addr = '' if len(elems) > 5: mn_terracoin_addr = elems[5] def update_mn(in_mn): in_mn.name = mn_name ipelems = mn_ipport.split(':') if len(ipelems) >= 2: in_mn.ip = ipelems[0] in_mn.port = ipelems[1] else: in_mn.ip = mn_ipport in_mn.port = '13333' in_mn.privateKey = mn_privkey in_mn.collateralAddress = mn_terracoin_addr in_mn.collateralTx = mn_tx_hash in_mn.collateralTxIndex = mn_tx_idx in_mn.collateralBip32Path = '' mn = self.config.get_mn_by_name(mn_name) if mn: msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText('Masternode ' + mn_name + ' exists. Overwrite?') msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No) msg.setDefaultButton(QMessageBox.Yes) retval = msg.exec_() del msg if retval == QMessageBox.No: skipped_cnt += 1 continue else: # overwrite data imported_cnt += 1 update_mn(mn) mn.modified = True modified = True mns_imported.append(mn) if self.curMasternode == mn: # current mn has been updated - update UI controls to new data self.displayMasternodeConfig(False) else: imported_cnt += 1 mn = MasterNodeConfig() update_mn(mn) modified = True self.config.add_mn(mn) self.cboMasternodes.addItem(mn.name, mn) mns_imported.append(mn) else: # incorrenct number of elements skipped_cnt += 1 if modified: self.updateControlsState() if imported_cnt: msg_text = 'Successfully imported %s masternode(s)' % str(imported_cnt) if skipped_cnt: msg_text += ', skipped: %s' % str(skipped_cnt) msg_text += ".\n\nIf you want to scan your " + self.getHwName() + \ " for BIP32 path(s) corresponding to collateral addresses, connect your " + \ self.getHwName() + " and click Yes." + \ "\n\nIf you want to enter BIP32 path(s) manually, click No." if self.queryDlg(message=msg_text, buttons=QMessageBox.Yes | QMessageBox.No, default_button=QMessageBox.Yes) == QMessageBox.Yes: # scan all Terracoin addresses from imported masternodes for BIP32 path, starting from # first standard Terracoin BIP32 path addresses_to_scan = [] for mn in mns_imported: if not mn.collateralBip32Path and mn.collateralAddress: addresses_to_scan.append(mn.collateralAddress) self.disconnectHardwareWallet() # forcing to enter the passphrase again found_paths, user_cancelled = self.hwScanForBip32Paths(addresses_to_scan) paths_missing = 0 for mn in mns_imported: if not mn.collateralBip32Path and mn.collateralAddress: path = found_paths.get(mn.collateralAddress) mn.collateralBip32Path = path if path: if self.curMasternode == mn: # current mn has been updated - update UI controls # to new data self.displayMasternodeConfig(False) else: paths_missing += 1 if paths_missing: self.warnMsg('Not all BIP32 paths were found. You have to manually enter ' 'missing paths.') elif skipped_cnt: self.infoMsg('Operation finished with no imported and %s skipped masternodes.' % str(skipped_cnt)) except Exception as e: self.errorMsg('Reading file failed: ' + str(e)) else: if file_name: self.errorMsg("File '" + file_name + "' does not exist") @pyqtSlot(bool) def on_btnSaveConfiguration_clicked(self, clicked): self.save_configuration() def save_configuration(self): self.config.save_to_file() self.editingEnabled = False self.updateControlsState() def updateControlsState(self): def update_fun(): editing = (self.editingEnabled and self.curMasternode is not None) self.edtMnIp.setReadOnly(not editing) self.edtMnName.setReadOnly(not editing) self.edtMnPort.setReadOnly(not editing) self.chbUseDefaultProtocolVersion.setEnabled(editing) self.edtMnProtocolVersion.setEnabled(editing) self.edtMnPrivateKey.setReadOnly(not editing) self.edtMnCollateralBip32Path.setReadOnly(not editing) self.edtMnCollateralAddress.setReadOnly(not editing) self.edtMnCollateralTx.setReadOnly(not editing) self.edtMnCollateralTxIndex.setReadOnly(not editing) self.btnGenerateMNPrivateKey.setEnabled(editing) self.btnFindCollateral.setEnabled(editing and self.curMasternode.collateralAddress is not None and self.curMasternode.collateralAddress != '') self.btnHwBip32ToAddress.setEnabled(editing) self.btnHwAddressToBip32.setEnabled(editing) self.btnDeleteMn.setEnabled(self.curMasternode is not None) self.btnEditMn.setEnabled(not self.editingEnabled and self.curMasternode is not None) self.btnSaveConfiguration.setEnabled(self.configModified()) self.btnHwDisconnect.setEnabled(True if self.hw_client else False) self.btnRefreshMnStatus.setEnabled(self.curMasternode is not None) self.btnBroadcastMn.setEnabled(self.curMasternode is not None) if threading.current_thread() != threading.main_thread(): self.call_in_main_thread(update_fun) else: update_fun() def configModified(self): # check if masternodes config was changed modified = self.config.modified if not modified: for mn in self.config.masternodes: if mn.modified: modified = True break return modified def newMasternodeConfig(self): new_mn = MasterNodeConfig() new_mn.new = True self.curMasternode = new_mn # find new, not used masternode name proposal name_found = None for nr in range(1, 100): exists = False for mn in self.config.masternodes: if mn.name == 'MN' + str(nr): exists = True break if not exists: name_found = 'MN' + str(nr) break if name_found: new_mn.name = name_found self.config.masternodes.append(new_mn) self.editingEnabled = True old_index = self.cboMasternodes.currentIndex() self.cboMasternodes.addItem(new_mn.name, new_mn) if old_index != -1: # if masternodes combo was not empty before adding new mn, we have to manually set combobox # position to a new masternode position self.cboMasternodes.setCurrentIndex(self.config.masternodes.index(self.curMasternode)) def curMnModified(self): if self.curMasternode: self.curMasternode.set_modified() self.btnSaveConfiguration.setEnabled(self.configModified()) @pyqtSlot(int) def on_cboMasternodes_currentIndexChanged(self): if self.cboMasternodes.currentIndex() >= 0: self.curMasternode = self.config.masternodes[self.cboMasternodes.currentIndex()] else: self.curMasternode = None self.displayMasternodeConfig(False) self.updateControlsState() if not self.inside_setup_ui: cache.set_value('WndMainCurMasternodeIndex', self.cboMasternodes.currentIndex()) @pyqtSlot(str) def on_edtMnName_textEdited(self): if self.curMasternode: self.curMnModified() self.curMasternode.name = self.edtMnName.text() self.cboMasternodes.setItemText(self.cboMasternodes.currentIndex(), self.curMasternode.name) @pyqtSlot(str) def on_edtMnIp_textEdited(self): if self.curMasternode: self.curMnModified() self.curMasternode.ip = self.edtMnIp.text() @pyqtSlot(str) def on_edtMnPort_textEdited(self): if self.curMasternode: self.curMnModified() self.curMasternode.port = self.edtMnPort.text() @pyqtSlot(bool) def on_chbUseDefaultProtocolVersion_toggled(self, use_default): if self.curMasternode: self.curMnModified() self.curMasternode.use_default_protocol_version = use_default self.edtMnProtocolVersion.setVisible(not use_default) @pyqtSlot(str) def on_edtMnProtocolVersion_textEdited(self, version): if self.curMasternode: self.curMnModified() self.curMasternode.protocol_version = version @pyqtSlot(str) def on_edtMnPrivateKey_textEdited(self): if self.curMasternode: self.curMnModified() self.curMasternode.privateKey = self.edtMnPrivateKey.text() @pyqtSlot(str) def on_edtMnCollateralBip32Path_textEdited(self): if self.curMasternode: self.curMnModified() self.curMasternode.collateralBip32Path = self.edtMnCollateralBip32Path.text() if self.curMasternode.collateralBip32Path: self.btnHwBip32ToAddress.setEnabled(True) else: self.btnHwBip32ToAddress.setEnabled(False) @pyqtSlot(str) def on_edtMnCollateralAddress_textEdited(self): if self.curMasternode: self.curMnModified() self.curMasternode.collateralAddress = self.edtMnCollateralAddress.text() self.updateControlsState() if self.curMasternode.collateralAddress: self.btnHwAddressToBip32.setEnabled(True) else: self.btnHwAddressToBip32.setEnabled(False) @pyqtSlot(str) def on_edtMnCollateralTx_textEdited(self, text): if self.curMasternode: self.curMnModified() self.curMasternode.collateralTx = text else: logging.warning('curMasternode == None') @pyqtSlot(str) def on_edtMnCollateralTxIndex_textEdited(self, text): if self.curMasternode: self.curMnModified() self.curMasternode.collateralTxIndex = text else: logging.warning('curMasternode == None') @pyqtSlot(bool) def on_btnGenerateMNPrivateKey_clicked(self): if self.edtMnPrivateKey.text(): msg = QMessageBox() msg.setIcon(QMessageBox.Warning) msg.setText('This will overwrite current private key value. Do you really want to proceed?') msg.setStandardButtons(QMessageBox.Ok | QMessageBox.No) msg.setDefaultButton(QMessageBox.No) retval = msg.exec_() if retval == QMessageBox.No: return wif = terracoin_utils.generate_privkey() self.curMasternode.privateKey = wif self.edtMnPrivateKey.setText(wif) self.curMnModified() @pyqtSlot(bool) def on_btnHwBip32ToAddress_clicked(self): """ Convert BIP32 path to Terracoin address. :return: """ try: self.connectHardwareWallet() if not self.hw_client: return if self.curMasternode and self.curMasternode.collateralBip32Path: terracoin_addr = hw_intf.get_address(self, self.curMasternode.collateralBip32Path) self.edtMnCollateralAddress.setText(terracoin_addr) self.curMasternode.collateralAddress = terracoin_addr self.curMnModified() except HardwareWalletCancelException: if self.hw_client: self.hw_client.init_device() except Exception as e: self.errorMsg(str(e)) @pyqtSlot(bool) def on_btnHwAddressToBip32_clicked(self): """ Converts Terracoin address to BIP32 path, using hardware wallet. :return: """ try: self.disconnectHardwareWallet() # forcing to enter the passphrase again self.connectHardwareWallet() if not self.hw_client: return if self.curMasternode and self.curMasternode.collateralAddress: paths, user_cancelled = self.hwScanForBip32Paths([self.curMasternode.collateralAddress]) if not user_cancelled: if not paths or len(paths) == 0: self.errorMsg("Couldn't find Terracoin address in your hardware wallet. If you are using HW passphrase, " "make sure, that you entered the correct one.") else: self.edtMnCollateralBip32Path.setText(paths.get(self.curMasternode.collateralAddress, '')) self.curMasternode.collateralBip32Path = paths.get(self.curMasternode.collateralAddress, '') self.curMnModified() else: logging.info('Cancelled') except HardwareWalletCancelException: if self.hw_client: self.hw_client.init_device() except Exception as e: self.errorMsg(str(e)) @pyqtSlot(bool) def on_btnBroadcastMn_clicked(self): """ Broadcasts information about configured Masternode within Terracoin network using Hwrdware Wallet for signing message and a Terracoin daemon for relaying message. Building broadcast message is based on work of chaeplin (https://github.com/chaeplin/dashmnb) """ if self.curMasternode: if not self.curMasternode.collateralTx: self.errorMsg("Collateral transaction id not set.") return try: int(self.curMasternode.collateralTx, 16) except ValueError: self.errorMsg('Invalid collateral transaction id (should be hexadecimal string).') self.edtMnCollateralTx.setFocus() return if not re.match('\d{1,4}', self.curMasternode.collateralTxIndex): self.errorMsg("Invalid collateral transaction index.") return if not re.match('\d{1,4}', self.curMasternode.port): self.errorMsg("Invalid masternode's TCP port number.") return if not re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', self.curMasternode.ip): self.errorMsg("Invalid masternode's IP address.") return if not self.curMasternode.privateKey: self.errorMsg("Masternode's private key not set.") return else: self.errorMsg("No masternode selected.") self.checkTerracoindConnection(wait_for_check_finish=True) if not self.terracoind_connection_ok: self.errorMsg("Connection to Terracoin daemon is not established.") return if self.is_terracoind_syncing: self.warnMsg("Terracoin daemon to which you are connected is synchronizing. You have to wait " "until it's finished.") return mn_status, _ = self.get_masternode_status(self.curMasternode) if mn_status in ('ENABLED', 'PRE_ENABLED'): if self.queryDlg("Warning: masternode state is %s. \n\nDo you really want to sent 'Start masternode' " "message? " % mn_status, default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Cancel: return try: mn_privkey = terracoin_utils.wif_to_privkey(self.curMasternode.privateKey) if not mn_privkey: self.errorMsg('Cannot convert masternode private key') return mn_pubkey = bitcoin.privkey_to_pubkey(mn_privkey) self.connectHardwareWallet() if not self.hw_client: return seq = 0xffffffff block_count = self.terracoind_intf.getblockcount() block_hash = self.terracoind_intf.getblockhash(block_count - 12) vintx = bytes.fromhex(self.curMasternode.collateralTx)[::-1].hex() vinno = int(self.curMasternode.collateralTxIndex).to_bytes(4, byteorder='big')[::-1].hex() vinsig = '00' vinseq = seq.to_bytes(4, byteorder='big')[::-1].hex() ipv6map = '00000000000000000000ffff' ipdigit = map(int, self.curMasternode.ip.split('.')) for i in ipdigit: ipv6map += i.to_bytes(1, byteorder='big')[::-1].hex() ipv6map += int(self.curMasternode.port).to_bytes(2, byteorder='big').hex() addr = hw_intf.get_address_and_pubkey(self, self.curMasternode.collateralBip32Path) hw_collateral_address = addr.get('address').strip() collateral_pubkey = addr.get('publicKey') cfg_collateral_address = self.curMasternode.collateralAddress.strip() if not cfg_collateral_address: # if mn config's collateral address is empty, assign that from hardware wallet self.curMasternode.collateralAddress = hw_collateral_address self.edtMnCollateralAddress.setText(cfg_collateral_address) self.updateControlsState() elif hw_collateral_address != cfg_collateral_address: # verify config's collateral addres with hardware wallet if self.queryDlg(message="The Terracoin address retrieved from the hardware wallet (%s) for the configured " "BIP32 path does not match the collateral address entered in the " "configuration: %s.\n\n" "Do you really want to continue?" % (hw_collateral_address, cfg_collateral_address), default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Cancel: return # check if there is 5000 TRC collateral msg_verification_problem = 'You can continue without verification step if you are sure, that ' \ 'TX ID/Index are correct.' try: utxos = self.terracoind_intf.getaddressutxos([hw_collateral_address]) found = False utxo = [] for utxo in utxos: if utxo['txid'] == self.curMasternode.collateralTx and \ str(utxo['outputIndex']) == self.curMasternode.collateralTxIndex: found = True break if found: if utxo.get('satoshis', None) != 500000000000: if self.queryDlg( message="Collateral transaction output should equal 500000000000 Satoshis (5000 TRC)" ", but its value is: %d Satoshis.\n\nDo you really want to continue?" % (utxo['satoshis']), buttons=QMessageBox.Yes | QMessageBox.Cancel, default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Cancel: return else: if self.queryDlg( message="Could not find the specified transaction id/index for the collateral address: %s." "\n\nDo you really want to continue?" % hw_collateral_address, buttons=QMessageBox.Yes | QMessageBox.Cancel, default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Cancel: return except TerracoindIndexException as e: # likely indexing not enabled if self.queryDlg( message="Collateral transaction verification problem: %s." "\n\n%s\nContinue?" % (str(e), msg_verification_problem), buttons=QMessageBox.Yes | QMessageBox.Cancel, default_button=QMessageBox.Yes, icon=QMessageBox.Warning) == QMessageBox.Cancel: return except Exception as e: if self.queryDlg( message="Collateral transaction verification error: %s." "\n\n%s\nContinue?" % (str(e), msg_verification_problem), buttons=QMessageBox.Yes | QMessageBox.Cancel, default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Cancel: return collateral_in = terracoin_utils.num_to_varint(len(collateral_pubkey)).hex() + collateral_pubkey.hex() delegate_in = terracoin_utils.num_to_varint(len(mn_pubkey) / 2).hex() + mn_pubkey sig_time = int(time.time()) info = self.terracoind_intf.getinfo() node_protocol_version = int(info['protocolversion']) if self.curMasternode.use_default_protocol_version or not self.curMasternode.protocol_version: protocol_version = node_protocol_version else: protocol_version = self.curMasternode.protocol_version serialize_for_sig = self.curMasternode.ip + ':' + self.curMasternode.port + str(int(sig_time)) + \ binascii.unhexlify(bitcoin.hash160(collateral_pubkey))[::-1].hex() + \ binascii.unhexlify(bitcoin.hash160(bytes.fromhex(mn_pubkey)))[::-1].hex() + \ str(protocol_version) sig = hw_intf.sign_message(self, self.curMasternode.collateralBip32Path, serialize_for_sig) if sig.address != hw_collateral_address: self.errorMsg('%s address mismatch after signing.' % self.getHwName()) return sig1 = sig.signature.hex() logging.debug('Start MN message signature: ' + sig.signature.hex()) logging.debug('Start MN message sig_time: ' + str(sig_time)) work_sig_time = sig_time.to_bytes(8, byteorder='big')[::-1].hex() work_protoversion = int(protocol_version).to_bytes(4, byteorder='big')[::-1].hex() last_ping_block_hash = bytes.fromhex(block_hash)[::-1].hex() last_ping_serialize_for_sig = terracoin_utils.serialize_input_str( self.curMasternode.collateralTx, self.curMasternode.collateralTxIndex, seq, '') + block_hash + str(sig_time) r = terracoin_utils.ecdsa_sign(last_ping_serialize_for_sig, self.curMasternode.privateKey) sig2 = (base64.b64decode(r).hex()) logging.debug('Start MN message signature2: ' + sig2) work = vintx + vinno + vinsig + vinseq \ + ipv6map + collateral_in + delegate_in \ + terracoin_utils.num_to_varint(len(sig1) / 2).hex() + sig1 \ + work_sig_time + work_protoversion \ + vintx + vinno + vinsig + vinseq \ + last_ping_block_hash + work_sig_time \ + terracoin_utils.num_to_varint(len(sig2) / 2).hex() + sig2 work = '01' + work if node_protocol_version >= 70208: work = work + '0001000100' ret = self.terracoind_intf.masternodebroadcast("decode", work) if ret['overall'].startswith('Successfully decoded broadcast messages for 1 masternodes'): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText('Press <OK> if you want to broadcast masternode configuration (protocol version: %s) ' 'or <Cancel> to exit.' % str(protocol_version)) msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) msg.setDefaultButton(QMessageBox.Ok) retval = msg.exec_() if retval == QMessageBox.Cancel: return ret = self.terracoind_intf.masternodebroadcast("relay", work) match = re.search("relayed broadcast messages for (\d+) masternodes.*failed to relay (\d+), total 1", ret['overall']) failed_count = 0 ok_count = 0 if match and len(match.groups()): ok_count = int(match.group(1)) failed_count = int(match.group(2)) overall = ret['overall'] errorMessage = '' if failed_count: del ret['overall'] keys = list(ret.keys()) if len(keys): # get the first (and currently the only) error message errorMessage = ret[keys[0]].get('errorMessage') if failed_count == 0: self.infoMsg(overall) else: self.errorMsg('Failed to start masternode.\n\nResponse from Terracoin daemon: %s.' % errorMessage) else: logging.error('Start MN error: ' + str(ret)) errorMessage = ret[list(ret.keys())[0]].get('errorMessage') self.errorMsg(errorMessage) except HardwareWalletCancelException: if self.hw_client: self.hw_client.init_device() except Exception as e: self.errorMsg(str(e)) logging.exception('Exception occurred.') def get_masternode_status(self, masternode): """ Returns tuple: the current masternode status (ENABLED, PRE_ENABLED, WATCHDOG_EXPIRED, ...) and a protocol version. :return: """ if self.terracoind_connection_ok: collateral_id = masternode.collateralTx + '-' + masternode.collateralTxIndex mns_info = self.terracoind_intf.get_masternodelist('full', collateral_id) if len(mns_info): protocol_version = mns_info[0].protocol if isinstance(protocol_version, str): try: protocol_version = int(protocol_version) except: logging.warning('Invalid masternode protocol version: ' + str(protocol_version)) return (mns_info[0].status, protocol_version) return '???', None def get_masternode_status_description(self): """ Get current masternode's extended status. """ if self.terracoind_connection_ok: collateral_id = self.curMasternode.collateralTx + '-' + self.curMasternode.collateralTxIndex if not self.curMasternode.collateralTx: return '<span style="color:red">Enter the collateral TX ID</span>' if not self.curMasternode.collateralTxIndex: return '<span style="color:red">Enter the collateral TX index</span>' mns_info = self.terracoind_intf.get_masternodelist('full', data_max_age=120) # read new data from the network # every 120 seconds mn_info = self.terracoind_intf.masternodes_by_ident.get(collateral_id) if mn_info: if mn_info.lastseen > 0: lastseen = datetime.datetime.fromtimestamp(float(mn_info.lastseen)) lastseen_str = self.config.to_string(lastseen) lastseen_ago = app_utils.seconds_to_human(time.time() - float(mn_info.lastseen), out_seconds=False) + ' ago' else: lastseen_str = 'never' lastseen_ago = '' if mn_info.lastpaidtime > 0: lastpaid = datetime.datetime.fromtimestamp(float(mn_info.lastpaidtime)) lastpaid_str = self.config.to_string(lastpaid) lastpaid_ago = app_utils.seconds_to_human(time.time() - float(mn_info.lastpaidtime), out_seconds=False) + ' ago' else: lastpaid_str = 'never' lastpaid_ago = '' activeseconds_str = app_utils.seconds_to_human(int(mn_info.activeseconds), out_seconds=False) if mn_info.status == 'ENABLED' or mn_info.status == 'PRE_ENABLED': color = 'green' else: color = 'red' enabled_mns_count = len(self.terracoind_intf.payment_queue) status = '<style>td {white-space:nowrap;padding-right:8px}' \ '.title {text-align:right;font-weight:bold}' \ '.ago {font-style:normal}' \ '.value {color:navy}' \ '</style>' \ '<table>' \ '<tr><td class="title">Status:</td><td class="value"><span style="color:%s">%s</span>' \ '</td><td>v.%s</td></tr>' \ '<tr><td class="title">Last Seen:</td><td class="value">%s</td><td class="ago">%s</td></tr>' \ '<tr><td class="title">Last Paid:</td><td class="value">%s</td><td class="ago">%s</td></tr>' \ '<tr><td class="title">Active Duration:</td><td class="value" colspan="2">%s</td></tr>' \ '<tr><td class="title">Queue/Count:</td><td class="value" colspan="2">%s/%s</td></tr>' \ '</table>' % \ (color, mn_info.status, str(mn_info.protocol), lastseen_str, lastseen_ago, lastpaid_str, lastpaid_ago, activeseconds_str, str(mn_info.queue_position), enabled_mns_count) else: status = '<span style="color:red">Masternode not found.</span>' else: status = '<span style="color:red">Problem with connection to terracoind.</span>' return status @pyqtSlot(bool) def on_btnRefreshMnStatus_clicked(self): def enable_buttons(): self.btnRefreshMnStatus.setEnabled(True) self.btnBroadcastMn.setEnabled(True) self.lblMnStatus.setText('<b>Retrieving masternode information, please wait...<b>') self.btnRefreshMnStatus.setEnabled(False) self.btnBroadcastMn.setEnabled(False) self.checkTerracoindConnection(wait_for_check_finish=True, call_on_check_finished=enable_buttons) if self.terracoind_connection_ok: try: status = self.get_masternode_status_description() self.lblMnStatus.setText(status) except: self.lblMnStatus.setText('') raise else: self.errorMsg('Terracoin daemon not connected') @pyqtSlot(bool) def on_actTransferFundsSelectedMn_triggered(self): """ Shows tranfser funds window with utxos related to current masternode. """ if self.curMasternode: src_addresses = [] if not self.curMasternode.collateralBip32Path: self.errorMsg("Enter the masternode collateral BIP32 path. You can use the 'right arrow' button " "on the right of the 'Collateral' edit box.") elif not self.curMasternode.collateralAddress: self.errorMsg("Enter the masternode collateral Terracoin address. You can use the 'left arrow' " "button on the left of the 'BIP32 path' edit box.") else: src_addresses.append((self.curMasternode.collateralAddress, self.curMasternode.collateralBip32Path)) self.executeTransferFundsDialog(src_addresses) else: self.errorMsg('No masternode selected') @pyqtSlot(bool) def on_actTransferFundsForAllMns_triggered(self): """ Shows tranfser funds window with utxos related to all masternodes. """ src_addresses = [] lacking_addresses = 0 for mn in self.config.masternodes: if mn.collateralAddress and mn.collateralBip32Path: src_addresses.append((mn.collateralAddress, mn.collateralBip32Path)) else: lacking_addresses += 1 if len(src_addresses): if lacking_addresses == 0 or \ self.queryDlg("Some of your Masternodes lack the Terracoin addres and/or BIP32 path of the collateral " "in their configuration. Transactions for these Masternodes will not be listed.\n\n" "Continue?", buttons=QMessageBox.Yes | QMessageBox.Cancel, default_button=QMessageBox.Yes, icon=QMessageBox.Warning) == QMessageBox.Yes: self.executeTransferFundsDialog(src_addresses) else: self.errorMsg('No masternode with the BIP32 path and Terracoin address configured.') @pyqtSlot(bool) def on_actTransferFundsForAddress_triggered(self): """ Shows tranfser funds window for address/path specified by the user. """ if not self.terracoind_intf.open(): self.errorMsg('Terracoin daemon not connected') else: ui = send_payout_dlg.SendPayoutDlg([], self) ui.exec_() def executeTransferFundsDialog(self, src_addresses): if not self.terracoind_intf.open(): self.errorMsg('Terracoin daemon not connected') else: ui = send_payout_dlg.SendPayoutDlg(src_addresses, self) ui.exec_() @pyqtSlot(bool) def on_actSignMessageWithHw_triggered(self): if self.curMasternode: self.connectHardwareWallet() if self.hw_client: if not self.curMasternode.collateralBip32Path: self.errorMsg("Empty masternode's collateral BIP32 path") else: ui = SignMessageDlg(self, self.curMasternode.collateralBip32Path, self.curMasternode.collateralAddress) ui.exec_() else: self.errorMsg("To sign messages, you must select a masternode.") @pyqtSlot(bool) def on_actHwSetup_triggered(self): """ Hardware wallet setup. """ self.connectHardwareWallet() if self.hw_client: ui = HwSetupDlg(self) ui.exec_() @pyqtSlot(bool) def on_actHwInitialize_triggered(self): """ Hardware wallet initialization from a seed. """ # self.connectHardwareWallet() # if self.hw_client: ui = HwInitializeDlg(self) ui.exec_() @pyqtSlot(bool) def on_btnFindCollateral_clicked(self): """ Open dialog with list of utxos of collateral terracoin address. :return: """ if self.curMasternode and self.curMasternode.collateralAddress: ui = FindCollateralTxDlg(self, self.terracoind_intf, self.curMasternode.collateralAddress) if ui.exec_(): tx, txidx = ui.getSelection() if tx: if self.curMasternode.collateralTx != tx or self.curMasternode.collateralTxIndex != str(txidx): self.curMasternode.collateralTx = tx self.curMasternode.collateralTxIndex = str(txidx) self.edtMnCollateralTx.setText(tx) self.edtMnCollateralTxIndex.setText(str(txidx)) self.curMnModified() self.updateControlsState() else: logging.warning("curMasternode or collateralAddress empty") @pyqtSlot(bool) def on_btnProposals_clicked(self): ui = ProposalsDlg(self, self.terracoind_intf) ui.exec_()
47.592361
146
0.565805
4a014e17abb80fd972d55415a5ace1a220fe802f
1,825
py
Python
heist/scripts/heist_client.py
Xenovortex/RG2021_projects
36444fbfbbbad021a6ba9c6b128e4ef161773636
[ "MIT" ]
null
null
null
heist/scripts/heist_client.py
Xenovortex/RG2021_projects
36444fbfbbbad021a6ba9c6b128e4ef161773636
[ "MIT" ]
null
null
null
heist/scripts/heist_client.py
Xenovortex/RG2021_projects
36444fbfbbbad021a6ba9c6b128e4ef161773636
[ "MIT" ]
1
2021-07-08T14:13:10.000Z
2021-07-08T14:13:10.000Z
#!/usr/bin/env python3 import rospy from std_msgs.msg import Int32 import rogata_library as rgt import numpy as np def visibility(guard,thief,wall_objects,max_seeing_distance): distance = np.linalg.norm(thief-guard) direction = (thief-guard)/distance direction = np.arctan2(direction[1],direction[0]) min_intersect = guard + max_seeing_distance * np.array([np.cos(direction),np.sin(direction)]) #rospy.loginfo("walls client: {}".format(len(wall_objects))) for walls in wall_objects: intersection = rogata.intersect(walls,guard,direction,max_seeing_distance) if np.linalg.norm(intersection-guard) <= np.linalg.norm(min_intersect-guard): min_intersect = intersection if np.linalg.norm(min_intersect-guard) >= distance: return 1 else: return 0 if __name__ == '__main__': rospy.init_node("Refree") rogata = rgt.rogata_helper() game_state = Int32() game_state = 0 has_bounty = 0 rate = rospy.Rate(10) # 10hz pub = rospy.Publisher("game_state", Int32, queue_size=10) try: while not rospy.is_shutdown(): guard_pos = rogata.get_pos("guard_obj") evader_pos = rogata.get_pos("evader_obj") evader_visible = visibility(guard_pos,evader_pos,["walls_obj"],1000) if rogata.inside("goal_obj",evader_pos): has_bounty = 1 print("Got the Bounty!") if rogata.inside("entry_obj",evader_pos) and has_bounty: game_state = 1 print ("The Evader Wins") if evader_visible: game_state = -1 print("The Guard Wins!") pub.publish(game_state) rate.sleep() except rospy.ROSInterruptException: pass
29.918033
97
0.624658
4a014e7939c35debc845f88c1162cc13e04d8675
517
py
Python
src/coalescenceml/step/output.py
CornellDataScience/CoalescenceML
6dd849b272c77011719952b47d5b55684d90733a
[ "Apache-2.0" ]
1
2022-03-22T17:48:55.000Z
2022-03-22T17:48:55.000Z
src/coalescenceml/step/output.py
CornellDataScience/CoalescenceML
6dd849b272c77011719952b47d5b55684d90733a
[ "Apache-2.0" ]
2
2022-02-18T18:48:12.000Z
2022-02-19T18:14:38.000Z
src/coalescenceml/step/output.py
CornellDataScience/CoalescenceML
6dd849b272c77011719952b47d5b55684d90733a
[ "Apache-2.0" ]
1
2022-02-10T02:52:22.000Z
2022-02-10T02:52:22.000Z
from typing import Any, Iterator, NamedTuple, Tuple, Type class Output: """Special object to store namedtuple with immutable name.""" def __init__(self, **kwargs: Type[Any]): self.outputs = NamedTuple("CoalescenceOutput", **kwargs) def items(self) -> Iterator[Tuple[str, Type[Any]]]: """Items returns an iterator over the output. Yields: Tuple[str, Type[Any]]: A pair of the output name and type """ yield from self.outputs.__annotations__.items()
30.411765
69
0.646035
4a014fdd32ba9bbd0bc5d6dae6ed1634410a910f
1,791
py
Python
data/p4VQE/R4/benchmark/startCirq703.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
data/p4VQE/R4/benchmark/startCirq703.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
data/p4VQE/R4/benchmark/startCirq703.py
UCLA-SEAL/QDiff
d968cbc47fe926b7f88b4adf10490f1edd6f8819
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 5/15/20 4:49 PM # @File : grover.py # qubit number=4 # total number=11 import cirq import cirq.google as cg from typing import Optional import sys from math import log2 import numpy as np #thatsNoCode from cirq.contrib.svg import SVGCircuit # Symbols for the rotation angles in the QAOA circuit. def make_circuit(n: int, input_qubit): c = cirq.Circuit() # circuit begin c.append(cirq.H.on(input_qubit[0])) # number=1 c.append(cirq.rx(2.9845130209103035).on(input_qubit[2])) # number=7 c.append(cirq.H.on(input_qubit[2])) # number=3 c.append(cirq.X.on(input_qubit[2])) # number=6 c.append(cirq.rx(1.6807520696705391).on(input_qubit[3])) # number=8 c.append(cirq.H.on(input_qubit[3])) # number=4 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=9 c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10 # circuit end c.append(cirq.measure(*input_qubit, key='result')) return c def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count,input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count =2000 simulator = cirq.Simulator() result = simulator.run(circuit, repetitions=circuit_sample_count) frequencies = result.histogram(key='result', fold_func=bitstring) writefile = open("../data/startCirq703.csv","w+") print(format(frequencies),file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit,file=writefile) writefile.close()
28.428571
77
0.696817
4a01504ed5c863fa171005ec8612ae4a54e0d40d
4,906
py
Python
kubernetes/client/models/v1beta1_custom_resource_definition_status.py
scele/kubernetes-client-python
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
[ "Apache-2.0" ]
null
null
null
kubernetes/client/models/v1beta1_custom_resource_definition_status.py
scele/kubernetes-client-python
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
[ "Apache-2.0" ]
null
null
null
kubernetes/client/models/v1beta1_custom_resource_definition_status.py
scele/kubernetes-client-python
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.8.2 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class V1beta1CustomResourceDefinitionStatus(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'accepted_names': 'V1beta1CustomResourceDefinitionNames', 'conditions': 'list[V1beta1CustomResourceDefinitionCondition]' } attribute_map = { 'accepted_names': 'acceptedNames', 'conditions': 'conditions' } def __init__(self, accepted_names=None, conditions=None): """ V1beta1CustomResourceDefinitionStatus - a model defined in Swagger """ self._accepted_names = None self._conditions = None self.discriminator = None self.accepted_names = accepted_names self.conditions = conditions @property def accepted_names(self): """ Gets the accepted_names of this V1beta1CustomResourceDefinitionStatus. AcceptedNames are the names that are actually being used to serve discovery They may be different than the names in spec. :return: The accepted_names of this V1beta1CustomResourceDefinitionStatus. :rtype: V1beta1CustomResourceDefinitionNames """ return self._accepted_names @accepted_names.setter def accepted_names(self, accepted_names): """ Sets the accepted_names of this V1beta1CustomResourceDefinitionStatus. AcceptedNames are the names that are actually being used to serve discovery They may be different than the names in spec. :param accepted_names: The accepted_names of this V1beta1CustomResourceDefinitionStatus. :type: V1beta1CustomResourceDefinitionNames """ if accepted_names is None: raise ValueError("Invalid value for `accepted_names`, must not be `None`") self._accepted_names = accepted_names @property def conditions(self): """ Gets the conditions of this V1beta1CustomResourceDefinitionStatus. Conditions indicate state for particular aspects of a CustomResourceDefinition :return: The conditions of this V1beta1CustomResourceDefinitionStatus. :rtype: list[V1beta1CustomResourceDefinitionCondition] """ return self._conditions @conditions.setter def conditions(self, conditions): """ Sets the conditions of this V1beta1CustomResourceDefinitionStatus. Conditions indicate state for particular aspects of a CustomResourceDefinition :param conditions: The conditions of this V1beta1CustomResourceDefinitionStatus. :type: list[V1beta1CustomResourceDefinitionCondition] """ if conditions is None: raise ValueError("Invalid value for `conditions`, must not be `None`") self._conditions = conditions def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, V1beta1CustomResourceDefinitionStatus): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
31.248408
129
0.626784
4a01518129bcacc33ddadda225867af8049226f0
2,533
py
Python
homeassistant/components/yolink/__init__.py
OverFlow636/core
e28f20cdf369c682b6617d9ae67110f230cf6ef8
[ "Apache-2.0" ]
null
null
null
homeassistant/components/yolink/__init__.py
OverFlow636/core
e28f20cdf369c682b6617d9ae67110f230cf6ef8
[ "Apache-2.0" ]
null
null
null
homeassistant/components/yolink/__init__.py
OverFlow636/core
e28f20cdf369c682b6617d9ae67110f230cf6ef8
[ "Apache-2.0" ]
null
null
null
"""The Detailed Hello World Push integration.""" import asyncio from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from . import hub from .const import DOMAIN PLATFORMS = ["sensor"] async def async_setup(hass: HomeAssistant, config: dict): """Set up the Hello World component.""" # Ensure our name space for storing objects is a known type. A dict is # common/preferred as it allows a separate instance of your class for each # instance that has been created in the UI. hass.data.setdefault(DOMAIN, {}) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Hello World from a config entry.""" # Store an instance of the "connecting" class that does the work of speaking # with your actual devices. hass.data[DOMAIN][entry.entry_id] = hub.Hub(hass, entry.data["username"], entry.data["password"]) # This creates each HA object for each platform your device requires. # It's done by calling the `async_setup_entry` function in each platform module. for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" # This is called when an entry/configured device is to be removed. The class # needs to unload itself, and remove callbacks. See the classes for further # details unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, component) for component in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok # async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: # """Set up yolink from a config entry.""" # # TODO Store an API object for your platforms to access # # hass.data[DOMAIN][entry.entry_id] = MyApi(...) # # print('yolink async_setup_entry') # # hass.config_entries.async_setup_platforms(entry, PLATFORMS) # # return True # # # async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: # """Unload a config entry.""" # # unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) # if unload_ok: # hass.data[DOMAIN].pop(entry.entry_id) # # return unload_ok
32.896104
101
0.690091
4a01531192bf22dbeb78e644536b5f1ef9922707
396
py
Python
customer/migrations/0017_ordermodel_is_shipped.py
pappan-123/Food
a5df233b580f4172d63c6e36544e903946c985d4
[ "MIT" ]
null
null
null
customer/migrations/0017_ordermodel_is_shipped.py
pappan-123/Food
a5df233b580f4172d63c6e36544e903946c985d4
[ "MIT" ]
null
null
null
customer/migrations/0017_ordermodel_is_shipped.py
pappan-123/Food
a5df233b580f4172d63c6e36544e903946c985d4
[ "MIT" ]
null
null
null
# Generated by Django 3.2.4 on 2021-09-23 10:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('customer', '0016_auto_20210923_1406'), ] operations = [ migrations.AddField( model_name='ordermodel', name='is_shipped', field=models.BooleanField(default=False), ), ]
20.842105
53
0.608586
4a0153694001d06066cfaf18dddbd83558fc8305
646
py
Python
01_Aulas_Introdutorias/Aula17_pacote_requests.py
CaioHenriqueMachado/Contribuindo_com_Python
a35ad2a4d3e2baad191432bdf162cc4ca8568f5e
[ "MIT" ]
2
2020-02-20T23:27:14.000Z
2021-09-03T00:42:29.000Z
01_Aulas_Introdutorias/Aula17_pacote_requests.py
CaioHenriqueMachado/Contribuindo_com_Python
a35ad2a4d3e2baad191432bdf162cc4ca8568f5e
[ "MIT" ]
null
null
null
01_Aulas_Introdutorias/Aula17_pacote_requests.py
CaioHenriqueMachado/Contribuindo_com_Python
a35ad2a4d3e2baad191432bdf162cc4ca8568f5e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Aug 21 13:58:16 2021 @author: caio """ # Instalando pacote requests #cmd: pip install requests # Usando api viacep import requests response = requests.get('https://viacep.com.br/ws/01001000/json/') #POKEMON POKEAPI: https://pokeapi.co/api/v2/pokemon/ditto/ assert(response.status_code == 200) response_string = response.text assert(str(type(response_string)) == "<class 'str'>") response_json = response.json() assert(str(type(response_json)) == "<class 'dict'>") logradouro = response_json['logradouro'] assert(logradouro == "Praça da Sé") print(response.json())
17.944444
66
0.70743
4a0154caf622a44dd83e45576901bfaeaaa38dda
5,849
py
Python
cogs/greet.py
xKynn/RDGreeter
6172d05d4b3668dba114ae09d3b9938f3356e325
[ "MIT" ]
1
2018-03-11T12:22:37.000Z
2018-03-11T12:22:37.000Z
cogs/greet.py
xKynn/RDGreeter
6172d05d4b3668dba114ae09d3b9938f3356e325
[ "MIT" ]
null
null
null
cogs/greet.py
xKynn/RDGreeter
6172d05d4b3668dba114ae09d3b9938f3356e325
[ "MIT" ]
null
null
null
import discord import texttable from discord.ext import commands from utils import db as greeterDB class Greet: def __init__(self, bot): self.bot = bot def is_admin(ctx): """ Check if the caller has manage_guild """ perm = ctx.author.guild_permissions.manage_guild or ctx.author.id == 203819318150955008 if not perm: ctx.bot.loop.create_task(ctx.error()) ctx.bot.loop.create_task(ctx.send("You need the `Manage Server` permission to use this command.")) return perm @staticmethod async def _clan_check(ctx, clan, conn): """ Common function called by all funcs to check if a role exists """ db_clan = await greeterDB.fetch_clan(conn, clan.lower().title()) if db_clan is None: await ctx.error() await ctx.send(f"A role with name **{clan.lower().title()}** was not found in the DB.\n") return bool(db_clan) async def _edit_message(self, ctx, clan, conn): """ Common function called by edit_message and add_role """ def check(message): return message.author.id == ctx.author.id and message.channel == ctx.channel await ctx.send("Type out your greeting message, use {USER} in your message wherever you want to use the" "invitee's name.\nCancel with `g/cancel`") greet = await self.bot.wait_for('message', check=check) if greet.content == 'g/cancel': return await greet.add_reaction('🗑') await greeterDB.edit_field(conn, 'message', clan.lower().title(), greet.content) await greet.add_reaction('✅') @commands.command(aliases=['add_clan', 'new']) @commands.check(is_admin) async def add_role(self, ctx, *, clan: str): """ Add a new role or invite source to the database. """ def check(message): return message.author.id == ctx.author.id and message.channel == ctx.channel async with self.bot.conn_pool.acquire() as conn: db_clan = await greeterDB.fetch_clan(conn, clan.lower().title()) if db_clan is None: await ctx.send(f"A role with name **{clan.lower().title()}** was not found in the DB.\nWould you like" "to create an entry for it? You will have to provide a greet messa" "ge.\nReply with `g/yes` or `g/no`.") def name_check(message): return message.author.id == ctx.author.id and message.channel == ctx.channel and \ message.content in ['g/no', 'g/yes'] msg = await self.bot.wait_for('message', check=name_check) if msg.content == 'g/no': return await msg.add_reaction('🗑') await greeterDB.add_clan(conn, clan.lower().title()) await self._edit_message(ctx, clan, conn) else: return await ctx.send(f"A role with name **{clan.lower().title()}** already exists.\n" "Use `edit_message` instead.") @commands.command(aliases=['edit_msg']) @commands.check(is_admin) async def edit_message(self, ctx, *, clan: str): """ Edit the message for an existing role/source. """ async with self.bot.conn_pool.acquire() as conn: if not await self._clan_check(ctx, clan, conn): return await self._edit_message(ctx, clan, conn) @commands.command(aliases=['delete_clan']) @commands.check(is_admin) async def delete_role(self, ctx, *, clan: str): """ Delete an existing role/source from the databse. """ def check(message): return message.author.id == ctx.author.id and message.channel == ctx.channel and \ message.content in ['g/no', 'g/yes'] async with self.bot.conn_pool.acquire() as conn: if not await self._clan_check(ctx, clan, conn): return await ctx.send(f"Are you sure you want to delete **{clan.lower().title()}** from the DB?.\n" "Reply with `g/yes` or `g/no`.") conf = await self.bot.wait_for('message', check=check) if conf.content == 'g/no': return await conf.add_reaction('🗑') async with conn.transaction(): await conn.execute('DELETE FROM greeter WHERE clan_name=$1', clan.lower().title()) await conf.add_reaction('✅') @commands.command(aliases=['roles', 'clans']) @commands.check(is_admin) async def info(self, ctx): """ List the current sources/roles and their invite codes. """ async with self.bot.conn_pool.acquire() as conn: clans = await conn.fetch('SELECT * from greeter') tbl = ['```', '```'] tab = texttable.Texttable() tab.header(('Roles', )) for clan in clans: tab.add_row((clan['clan_name'],)) tbl.insert(1, tab.draw()) await ctx.send('\n'.join(tbl)) @commands.command(aliases=['test']) @commands.check(is_admin) async def preview(self, ctx, *, clan: str): """ Get DMed with the specified role/source's greet message. """ async with self.bot.conn_pool.acquire() as conn: db_clan = await greeterDB.fetch_clan(conn, clan.lower().title()) if db_clan is None: await ctx.error() return await ctx.send(f"A role with name **{clan.lower().title()}** was not found in the DB.\n") try: await ctx.author.send(db_clan['message'].replace('{USER}', ctx.author.name)) except discord.DiscordException: await ctx.error() await ctx.send("You have DMs from non-friends disabled!") def setup(bot): bot.add_cog(Greet(bot))
44.992308
118
0.586767
4a01550c0c4c28192d182c3b52a68470157d5679
2,640
py
Python
dashboard/dashboard/main_test.py
ravitejavalluri/catapult
246a39a82c2213d913a96fff020a263838dc76e6
[ "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
dashboard/dashboard/main_test.py
kind-john/catapult
29635376119833f172a58a48a3282d353ce55d2b
[ "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
dashboard/dashboard/main_test.py
kind-john/catapult
29635376119833f172a58a48a3282d353ce55d2b
[ "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest import webapp2 import webtest from dashboard import main from dashboard.common import testing_common from dashboard.common import utils from dashboard.models import anomaly class MainTest(testing_common.TestCase): def setUp(self): super(MainTest, self).setUp() app = webapp2.WSGIApplication([('/', main.MainHandler)]) self.testapp = webtest.TestApp(app) def testGet_PageIsShown(self): response = self.testapp.get('/') self.assertIn('<html>', response.body) def testGetColorClass(self): self.assertEqual('over-50', main._GetColorClass(95)) self.assertEqual('over-40', main._GetColorClass(45)) self.assertEqual('over-30', main._GetColorClass(31)) self.assertEqual('over-20', main._GetColorClass(30)) self.assertEqual('over-10', main._GetColorClass(12.0)) self.assertEqual('under-10', main._GetColorClass(0.1)) def testAnomalyInfoDicts(self): testing_common.AddTests(['M'], ['b'], {'t': {'foo': {}}}) foo_key = utils.TestKey('M/b/t/foo') foo_anomaly = anomaly.Anomaly( start_revision=14999, end_revision=15000, test=foo_key, bug_id=12345, median_before_anomaly=100, median_after_anomaly=200) anomaly_key = foo_anomaly.put() self.assertEqual( [ { 'master': 'M', 'bot': 'b', 'testsuite': 't', 'test': 'foo', 'bug_id': 12345, 'start_revision': 14999, 'end_revision': 15000, 'key': anomaly_key.urlsafe(), 'dashboard_link': ('https://chromeperf.appspot.com' '/group_report?keys=%s' % anomaly_key.urlsafe()), 'percent_changed': '100.0%', 'color_class': 'over-50', 'improvement': False, } ], main._AnomalyInfoDicts([foo_anomaly], {foo_key: foo_key.get()})) def testAnomalyInfoDicts_MissingTest_AnomalySkipped(self): testing_common.AddTests(['M'], ['b'], {'t': {'foo': {}}}) foo_key = utils.TestKey('M/b/t/foo') foo_anomaly = anomaly.Anomaly( start_revision=14999, end_revision=15000, test=foo_key, bug_id=12345, median_before_anomaly=100, median_after_anomaly=200) foo_anomaly.put() self.assertEqual([], main._AnomalyInfoDicts([foo_anomaly], {})) if __name__ == '__main__': unittest.main()
33.417722
72
0.614773
4a01553e3da74efcf0eb4a9ff4c0cc3d3b916b23
1,253
py
Python
jdcloud_sdk/services/dcap/apis/ModifyLevelRequest.py
Tanc009/jdcloud-sdk-python
8b045c99bc5b73ca7348e950b6f01e03a27982f5
[ "Apache-2.0" ]
14
2018-04-19T09:53:56.000Z
2022-01-27T06:05:48.000Z
jdcloud_sdk/services/dcap/apis/ModifyLevelRequest.py
Tanc009/jdcloud-sdk-python
8b045c99bc5b73ca7348e950b6f01e03a27982f5
[ "Apache-2.0" ]
15
2018-09-11T05:39:54.000Z
2021-07-02T12:38:02.000Z
jdcloud_sdk/services/dcap/apis/ModifyLevelRequest.py
Tanc009/jdcloud-sdk-python
8b045c99bc5b73ca7348e950b6f01e03a27982f5
[ "Apache-2.0" ]
33
2018-04-20T05:29:16.000Z
2022-02-17T09:10:05.000Z
# coding=utf8 # Copyright 2018 JDCLOUD.COM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTE: This class is auto generated by the jdcloud code generator program. from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest class ModifyLevelRequest(JDCloudRequest): """ 修改敏感数据分级策略 """ def __init__(self, parameters, header=None, version="v1"): super(ModifyLevelRequest, self).__init__( '/level/{levelId}', 'PATCH', header, version) self.parameters = parameters class ModifyLevelParameters(object): def __init__(self, levelId, levelSpec): """ :param levelId: 分级 ID :param levelSpec: 敏感数据分级描述 """ self.levelId = levelId self.levelSpec = levelSpec
28.477273
75
0.704709
4a01557121db2b5a99a8eaf9595a5663f7294f22
27,044
py
Python
botbowl/core/pathfinding/python_pathfinding.py
hadarshavit/botbowl
eec77bdda427d5e245de6f5e136e06886183b2c7
[ "Apache-2.0" ]
null
null
null
botbowl/core/pathfinding/python_pathfinding.py
hadarshavit/botbowl
eec77bdda427d5e245de6f5e136e06886183b2c7
[ "Apache-2.0" ]
null
null
null
botbowl/core/pathfinding/python_pathfinding.py
hadarshavit/botbowl
eec77bdda427d5e245de6f5e136e06886183b2c7
[ "Apache-2.0" ]
null
null
null
""" ========================== Author: Niels Justesen Year: 2020 ========================== This module contains pathfinding functionalities for botbowl. """ from typing import Tuple, List, Optional from botbowl.core.table import Rules from botbowl.core.model import Square from botbowl.core.forward_model import treat_as_immutable from botbowl.core.table import Skill, WeatherType import copy import numpy as np from queue import PriorityQueue @treat_as_immutable class Path: final_node: 'Node' _steps: Tuple[Square] _rolls: Tuple[List[int]] prob: float block_dice: Optional[int] handoff_roll: Optional[int] foul_roll: Optional[int] def __init__(self, node: 'Node'): super().__init__() self.final_node = node self._steps = None self.prob = node.prob self._rolls = None self.block_dice = node.block_dice self.handoff_roll = node.handoff_roll self.foul_roll = node.foul_roll @property def steps(self) -> Tuple[Square]: if self._steps is None: self.collect_path() return self._steps @property def rolls(self) -> Tuple[List[int]]: if self._rolls is None: self.collect_path() return self._rolls def __len__(self) -> int: return len(self.steps) def get_last_step(self) -> 'Square': return self.final_node.position def is_empty(self) -> bool: return len(self) == 0 def collect_path(self): steps: List[Square] = [] rolls: List[List[int]] = [] node = self.final_node while node.parent is not None: steps.append(node.position) rolls.append(node.rolls) node = node.parent self._steps = tuple(reversed(steps)) self._rolls = tuple(reversed(rolls)) def __eq__(self, other): return self.prob == other.prob and \ self.steps == other.steps and \ self.rolls == other.rolls and \ self.block_dice == other.block_dice and \ self.handoff_roll == other.handoff_roll and \ self.foul_roll == other.foul_roll def __repr__(self): block = f", block_dice={self.block_dice}" if self.block_dice is not None else "" handoff = f", handoff_roll={self.handoff_roll}" if self.handoff_roll is not None else "" foul = f", foul_roll={self.foul_roll}" if self.foul_roll is not None else "" return f"Path(target={self.steps[-1]}, prob={self.prob}, {block}{handoff}{foul})" class Node: TRR = 0 DODGE = 1 SURE_FEET = 2 SURE_HANDS = 3 def __lt__(self, other): return self.euclidean_distance < other.euclidean_distance def __init__(self, parent, position, moves_left, gfis_left, euclidean_distance, rr_states=None, block_dice=None, foul_roll=None, handoff_roll=None, can_foul=False, can_block=False, can_handoff=False): self.parent = parent self.position = position self.moves_left = moves_left self.gfis_left = gfis_left self.euclidean_distance = euclidean_distance self.prob = parent.prob if parent is not None else 1 self.foul_roll = foul_roll self.handoff_roll = handoff_roll self.rolls = [] self.block_dice = block_dice self.rr_states = rr_states if rr_states is not None else parent.rr_states if parent is not None: self.can_foul = parent.can_foul self.can_block = parent.can_block self.can_handoff = parent.can_handoff else: self.can_foul = can_foul self.can_block = can_block self.can_handoff = can_handoff def _apply_roll(self, p, skill_rr, team_rr): # Find new states new_states = {} for state, prev_p in self.rr_states.items(): p_success = prev_p * p if state in new_states: new_states[state] += p_success else: new_states[state] = prev_p * p if skill_rr is not None and state[skill_rr]: self._add_fail_state(new_states, state, prev_p, p, skill_rr) elif state[team_rr]: self._add_fail_state(new_states, state, prev_p, p, team_rr) ''' # Merge new states with previous states for rr_state, rr_state_p in new_rr_states.items(): if rr_state in self.rr_states: self.rr_states[rr_state] += rr_state_p else: self.rr_states[rr_state] = rr_state_p ''' # Merge with self.rr_state self.rr_states = new_states self.prob = sum(self.rr_states.values()) def _add_fail_state(self, new_states, prev_state, prev_state_p, p, index): fail_state = [rr for rr in prev_state] fail_state[index] = False fail_state_p = prev_state_p * (1 - p) * p fail_state = tuple(fail_state) if fail_state in new_states: new_states[fail_state] += fail_state_p else: new_states[fail_state] = fail_state_p def apply_gfi(self, target): self.rolls.append(target) self._apply_roll((7 - target) / 6, self.SURE_FEET, self.TRR) def apply_dodge(self, target): self.rolls.append(target) self._apply_roll((7 - target) / 6, self.DODGE, self.TRR) def apply_pickup(self, target): self.rolls.append(target) self._apply_roll((7 - target) / 6, self.SURE_HANDS, self.TRR) # TODO: should pickup be added to path prob if it's the last step? def apply_handoff(self, target): self.handoff_roll = target self.can_handoff = False def apply_foul(self, target): self.foul_roll = target self.can_foul = False def apply_stand_up(self, target): self.rolls.append(target) self._apply_roll((7 - target) / 6, None, self.TRR) class Pathfinder: DIRECTIONS = [Square(-1, -1), Square(-1, 0), Square(-1, 1), Square(0, -1), Square(0, 1), Square(1, -1), Square(1, 0), Square(1, 1)] def __init__(self, game, player, trr=False, directly_to_adjacent=False, can_block=False, can_handoff=False, can_foul=False): self.game = game self.player = player self.trr = trr self.directly_to_adjacent = directly_to_adjacent self.can_block = can_block self.can_handoff = can_handoff self.can_foul = can_foul self.carries_ball = None self.endzone_x = None self.ball_position = None self.ma = player.num_moves_left() self.gfis = player.num_gfis_left() self.locked_nodes = np.full((game.arena.height, game.arena.width), None) self.nodes = np.full((game.arena.height, game.arena.width), None) self.tzones = np.zeros((game.arena.height, game.arena.width), dtype=np.uint8) self.current_prob = 1 self.open_set = PriorityQueue() self.risky_sets = {} self.target_found = False for p in game.get_players_on_pitch(): if p.team != player.team and p.has_tackle_zone(): for square in game.get_adjacent_squares(p.position): self.tzones[square.y][square.x] += 1 self.gfi_target = None def get_path(self, target): paths = self.get_paths(target) if len(paths) > 0: return paths[0] return None def get_paths(self, target=None): self.gfis = self.player.num_gfis_left() self.ma = self.player.num_moves_left() self.carries_ball = self.player is self.game.get_ball_carrier() self.ball_position = self.game.get_ball_position() if self.game.get_ball().on_ground else None self.endzone_x = 1 if self.player.team is self.game.state.home_team else self.game.arena.width - 2 self.gfi_target = 3 if self.game.state.weather is WeatherType.BLIZZARD else 2 can_dodge = self.player.has_skill(Skill.DODGE) and Skill.DODGE not in self.player.state.used_skills can_sure_feet = self.player.has_skill(Skill.SURE_FEET) and Skill.SURE_FEET not in self.player.state.used_skills can_sure_hands = self.player.has_skill(Skill.SURE_HANDS) rr_states = {(self.trr, can_dodge, can_sure_feet, can_sure_hands): 1} # RRs left and probability of success node = Node(None, position=self.player.position, moves_left=self.ma, gfis_left=self.gfis, euclidean_distance=0, rr_states=rr_states, can_foul=self.can_foul, can_handoff=self.can_handoff, can_block=self.can_block) if not self.player.state.up: node = self._expand_stand_up(node) self.nodes[node.position.y][node.position.x] = node self.open_set.put((0, node)) self._expansion(target) self._clear() while not self.target_found and len(self.risky_sets) > 0: self._prepare_nodes() self._expansion(target) self._clear() return self._collect_paths(target) def _get_pickup_target(self, to_pos): zones_to = self.tzones[to_pos.y][to_pos.x] modifiers = 1 if not self.player.has_skill(Skill.BIG_HAND): modifiers -= int(zones_to) if self.game.state.weather == WeatherType.POURING_RAIN: if not self.player.has_skill(Skill.BIG_HAND): modifiers -= 1 if self.player.has_skill(Skill.EXTRA_ARMS): modifiers += 1 target = Rules.agility_table[self.player.get_ag()] - modifiers return min(6, max(2, target)) def _get_handoff_target(self, catcher): modifiers = self.game.get_catch_modifiers(catcher, handoff=True) target = Rules.agility_table[catcher.get_ag()] - modifiers return min(6, max(2, target)) def _get_dodge_target(self, from_pos, to_pos): zones_from = self.tzones[from_pos.y][from_pos.x] if zones_from == 0: return None zones_to = int(self.tzones[to_pos.y][to_pos.x]) modifiers = 1 ignore_opp_mods = False if self.player.has_skill(Skill.STUNTY): modifiers = 1 ignore_opp_mods = True if self.player.has_skill(Skill.TITCHY): modifiers += 1 ignore_opp_mods = True if self.player.has_skill(Skill.TWO_HEADS): modifiers += 1 if not ignore_opp_mods: modifiers -= zones_to target = Rules.agility_table[self.player.get_ag()] - modifiers return min(6, max(2, target)) def _expand(self, node: Node, target=None): if target is not None: # TODO: handoff? if type(target) == Square and target.distance(node.position) > node.moves_left + node.gfis_left: return if type(target) == int and abs(target - node.position.x) > node.moves_left + node.gfis_left: return if type(target) == Square and node.position == target: self.target_found = True return if type(target) == int and node.position.x == target: self.target_found = True return if node.block_dice is not None or node.handoff_roll is not None: return if self.carries_ball and node.position.x == self.endzone_x: return if (not self.carries_ball) and node.position == self.ball_position: return out_of_moves = False if node.moves_left + node.gfis_left <= 0: if not node.can_handoff and not node.can_foul: return out_of_moves = True for direction in self.DIRECTIONS: next_node = self._expand_node(node, direction, out_of_moves=out_of_moves) if next_node is None: continue rounded_p = round(next_node.prob, 6) if rounded_p < self.current_prob: self._add_risky_move(rounded_p, next_node) else: self.open_set.put((next_node.euclidean_distance, next_node)) self.nodes[next_node.position.y][next_node.position.x] = next_node def _expand_node(self, node, direction, out_of_moves=False): euclidean_distance = node.euclidean_distance + 1 if direction.x == 0 or direction.y == 0 else node.euclidean_distance + 1.41421 to_pos = self.game.state.pitch.squares[node.position.y + direction.y][node.position.x + direction.x] if not (1 <= to_pos.x < self.game.arena.width - 1 and 1 <= to_pos.y < self.game.arena.height - 1): return None player_at = self.game.get_player_at(to_pos) if player_at is not None: if player_at.team == self.player.team and node.can_handoff and player_at.can_catch(): return self._expand_handoff_node(node, to_pos) elif player_at.team != self.player.team and node.can_block and player_at.state.up: return self._expand_block_node(node, euclidean_distance, to_pos, player_at) elif player_at.team != self.player.team and node.can_foul and not player_at.state.up: return self._expand_foul_node(node, to_pos, player_at) return None if not out_of_moves: return self._expand_move_node(node, euclidean_distance, to_pos) return None def _expand_move_node(self, node, euclidean_distance, to_pos): best_node = self.nodes[to_pos.y][to_pos.x] best_before = self.locked_nodes[to_pos.y][to_pos.x] gfi = node.moves_left == 0 moves_left_next = max(0, node.moves_left - 1) gfis_left_next = node.gfis_left - 1 if gfi else node.gfis_left total_moves_left = moves_left_next + gfis_left_next if best_node is not None: best_total_moves_left = best_node.moves_left + best_node.gfis_left if total_moves_left < best_total_moves_left: return None if total_moves_left == best_total_moves_left and euclidean_distance >= best_node.euclidean_distance: return None next_node = Node(node, to_pos, moves_left_next, gfis_left_next, euclidean_distance) if gfi: next_node.apply_gfi(self.gfi_target) if self.tzones[node.position.y][node.position.x] > 0: target = self._get_dodge_target(node.position, to_pos) next_node.apply_dodge(target) if self.ball_position == to_pos: target = self._get_pickup_target(to_pos) next_node.apply_pickup(target) if best_before is not None and self._dominant(next_node, best_before) == best_before: return None return next_node def _expand_foul_node(self, node, to_pos, player_at): best_node = self.nodes[to_pos.y][to_pos.x] best_before = self.locked_nodes[to_pos.y][to_pos.x] assists_from, assists_to = self.game.num_assists_at(self.player, player_at, node.position, foul=True) target = min(12, max(2, player_at.get_av() + 1 - assists_from + assists_to)) next_node = Node(node, to_pos, 0, 0, node.euclidean_distance) next_node.apply_foul(target) if best_node is not None and self._best(next_node, best_node) == best_node: return None if best_before is not None and self._dominant(next_node, best_before) == best_before: return None return next_node def _expand_handoff_node(self, node, to_pos): best_node = self.nodes[to_pos.y][to_pos.x] best_before = self.locked_nodes[to_pos.y][to_pos.x] player_at = self.game.get_player_at(to_pos) next_node = Node(node, to_pos, 0, 0, node.euclidean_distance) target = self._get_handoff_target(player_at) next_node.apply_handoff(target) if best_node is not None and self._best(next_node, best_node) == best_node: return None if best_before is not None and self._dominant(next_node, best_before) == best_before: return None return next_node def _expand_block_node(self, node, euclidean_distance, to_pos, player_at): best_node = self.nodes[to_pos.y][to_pos.x] best_before = self.locked_nodes[to_pos.y][to_pos.x] block_dice = self.game.num_block_dice_at(attacker=self.player, defender=player_at, position=node.position, blitz=True) gfi = node.moves_left == 0 moves_left_next = node.moves_left - 1 if not gfi else node.moves_left gfis_left_next = node.gfis_left - 1 if gfi else node.gfis_left next_node = Node(node, to_pos, moves_left_next, gfis_left_next, euclidean_distance, block_dice=block_dice, can_block=False) if gfi: next_node.apply_gfi(self.gfi_target) if best_node is not None and self._best(next_node, best_node) == best_node: return None if best_before is not None and self._dominant(next_node, best_before) == best_before: return None return next_node def _add_risky_move(self, prob, node): if prob not in self.risky_sets: self.risky_sets[prob] = [] self.risky_sets[prob].append(node) def _expand_stand_up(self, node): if self.player.has_skill(Skill.JUMP_UP): return Node(node, self.player.position, self.ma, self.gfis, euclidean_distance=0) elif self.ma < 3: target = max(2, min(6, 4-self.game.get_stand_up_modifier(self.player))) next_node = Node(node, self.player.position, 0, self.gfis, euclidean_distance=0) next_node.apply_stand_up(target) return next_node next_node = Node(node, self.player.position, self.ma - 3, self.gfis, euclidean_distance=0) return next_node def _best(self, a: Node, b: Node): if self.directly_to_adjacent and a.position.distance(self.player.position) == 1 and a.moves_left > b.moves_left: return a if self.directly_to_adjacent and b.position.distance(self.player.position) == 1 and b.moves_left > a.moves_left: return b a_moves_left = a.moves_left + a.gfis_left b_moves_left = b.moves_left + b.gfis_left block = a.block_dice is not None foul = a.foul_roll is not None if a.prob > b.prob: return a if b.prob > a.prob: return b if foul and a.foul_roll < b.foul_roll: return a if foul and b.foul_roll < a.foul_roll: return b if block and a.block_dice > b.block_dice: return a if block and b.block_dice > a.block_dice: return b if a_moves_left > b_moves_left: return a if b_moves_left > a_moves_left: return b if a.euclidean_distance < b.euclidean_distance: return a if b.euclidean_distance < a.euclidean_distance: return b return None def _dominant(self, a: Node, b: Node): if self.directly_to_adjacent and a.position.distance(self.player.position) == 1 and a.moves_left > b.moves_left: return a if self.directly_to_adjacent and b.position.distance(self.player.position) == 1 and b.moves_left > a.moves_left: return b a_moves_left = a.moves_left + a.gfis_left b_moves_left = b.moves_left + b.gfis_left # TODO: Write out as above if a.prob > b.prob and (a.foul_roll is None or a.foul_roll <= b.foul_roll) and (a.block_dice is None or a.block_dice >= b.block_dice) and (a_moves_left > b_moves_left or (a_moves_left == b_moves_left and a.euclidean_distance < b.euclidean_distance)): return a if b.prob > a.prob and (b.foul_roll is None or b.foul_roll <= a.foul_roll) and (b.block_dice is None or b.block_dice >= a.block_dice) and (b_moves_left > a_moves_left or (b_moves_left == a_moves_left and b.euclidean_distance < a.euclidean_distance)): return b return None def _clear(self): for y in range(self.game.arena.height): for x in range(self.game.arena.width): node = self.nodes[y][x] if node is not None: before = self.locked_nodes[y][x] if before is None or self._best(node, before) == node: self.locked_nodes[y][x] = node self.nodes[y][x] = None self.open_set = PriorityQueue() def _prepare_nodes(self): if len(self.risky_sets) > 0: probs = sorted(self.risky_sets.keys()) self.current_prob = probs[-1] for node in self.risky_sets[probs[-1]]: best_before = self.locked_nodes[node.position.y][node.position.x] if best_before is not None and self._dominant(best_before, node) == best_before: continue existing_node = self.nodes[node.position.y][node.position.x] if existing_node is None or self._best(existing_node, node) == node: self.open_set.put((node.euclidean_distance, node)) self.nodes[node.position.y][node.position.x] = node del self.risky_sets[probs[-1]] def _expansion(self, target=None): while not self.open_set.empty(): _, best_node = self.open_set.get() self._expand(best_node, target) def _collect_paths(self, target=None): if type(target) == Square: node = self.locked_nodes[target.y][target.x] if node is not None: return [Path(node)] return [] paths = [] for y in range(self.game.arena.height): for x in range(self.game.arena.width): if self.player.position.x == x and self.player.position.y == y: continue if type(target) == int and not target == x: continue node = self.locked_nodes[y][x] if node is not None: paths.append(Path(node)) return paths def get_safest_path(game, player, position, from_position=None, allow_team_reroll=False, num_moves_used=0, blitz=False): """ :param game: :param player: the player to move :param position: the location to move to :param num_moves_used: the number of moves already used by the player. If None, it will use the player's current number of used moves. :param allow_team_reroll: allow team rerolls to be used. :return a path containing the list of squares that forms the safest (and thereafter shortest) path for the given player to the given position and the probability of success. """ if from_position is not None and num_moves_used != 0: orig_player, orig_ball = _alter_state(game, player, from_position, num_moves_used) can_handoff = game.is_handoff_available() and game.get_ball_carrier() == player finder = Pathfinder(game, player, trr=allow_team_reroll, can_block=blitz, can_handoff=can_handoff) path = finder.get_path(target=position) if from_position is not None and num_moves_used != 0: _reset_state(game, player, orig_player, orig_ball) return path def get_safest_path_to_endzone(game, player, from_position=None, allow_team_reroll=False, num_moves_used=None): """ :param game: :param player: :param from_position: position to start movement from. If None, it will start from the player's current position. :param num_moves_used: the number of moves already used by the player. If None, it will use the player's current number of used moves. :param allow_team_reroll: allow team rerolls to be used.´ :return: a path containing the list of squares that forms the safest (and thereafter shortest) path for the given player to a position in the opponent endzone. """ if from_position is not None and num_moves_used != 0: orig_player, orig_ball = _alter_state(game, player, from_position, num_moves_used) x = game.get_opp_endzone_x(player.team) finder = Pathfinder(game, player, trr=allow_team_reroll) path = finder.get_path(target=x) if from_position is not None and num_moves_used != 0: _reset_state(game, player, orig_player, orig_ball) return path def get_all_paths(game, player, from_position=None, allow_team_reroll=False, num_moves_used=None, blitz=False): """ :param game: :param player: the player to move :param from_position: position to start movement from. If None, it will start from the player's current position. :param num_moves_used: the number of moves already used by the player. If None, it will use the player's current number of used moves. :param allow_team_reroll: allow team rerolls to be used. :param blitz: only finds blitz moves if True. :return a path containing the list of squares that forms the safest (and thereafter shortest) path for the given player to a position that is adjacent to the other player and the probability of success. """ if from_position is not None and num_moves_used != 0: orig_player, orig_ball = _alter_state(game, player, from_position, num_moves_used) finder = Pathfinder(game, player, trr=allow_team_reroll, can_block=blitz) paths = finder.get_paths() if from_position is not None and num_moves_used != 0: _reset_state(game, player, orig_player, orig_ball) return paths def _alter_state(game, player, from_position, moves_used): orig_player, orig_ball = None, None if from_position is not None or moves_used is not None: orig_player = copy.deepcopy(player) orig_ball = copy.deepcopy(game.get_ball()) # Move player if another starting position is used if from_position is not None: assert game.get_player_at(from_position) is None or game.get_player_at(from_position) == player game.move(player, from_position) if from_position == game.get_ball_position() and game.get_ball().on_ground: game.get_ball().carried = True if moves_used != None: assert moves_used >= 0 player.state.moves = moves_used if moves_used > 0: player.state.up = True return orig_player, orig_ball def _reset_state(game, player, orig_player, orig_ball): if orig_player is not None: game.move(player, orig_player.position) player.state = orig_player.state if orig_ball is not None: game.ball = orig_ball
42.656151
258
0.629493
4a0155b0baa0426936aea465fea26b23d98f9b3e
818
py
Python
misc/parse_templates.py
hitoto/openshift-diy-php-hitoto
3fed311516b89e42380248488a595b1f83887375
[ "MIT" ]
1
2019-04-14T20:30:27.000Z
2019-04-14T20:30:27.000Z
misc/parse_templates.py
hitoto/openshift-diy-php-hitoto
3fed311516b89e42380248488a595b1f83887375
[ "MIT" ]
null
null
null
misc/parse_templates.py
hitoto/openshift-diy-php-hitoto
3fed311516b89e42380248488a595b1f83887375
[ "MIT" ]
null
null
null
import os, re, shutil internalIp = os.environ['OPENSHIFT_DIY_IP'] runtimeDir = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime" repoDir = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime/repo" f = open(repoDir + '/misc/templates/httpd.conf.tpl', 'r') conf = f.read().replace('{{OPENSHIFT_INTERNAL_IP}}', internalIp).replace('{{OPENSHIFT_REPO_DIR}}', repoDir).replace('{{OPENSHIFT_RUNTIME_DIR}}', runtimeDir) f.close() f = open(runtimeDir + '/srv/httpd/conf/httpd.conf', 'w') f.write(conf) f.close() f = open(repoDir + '/misc/templates/php.ini.tpl', 'r') conf = f.read().replace('{{OPENSHIFT_INTERNAL_IP}}', internalIp).replace('{{OPENSHIFT_REPO_DIR}}', repoDir).replace('{{OPENSHIFT_RUNTIME_DIR}}', runtimeDir) f.close() f = open(runtimeDir + '/srv/php/etc/apache2/php.ini', 'w') f.write(conf) f.close()
37.181818
156
0.706601
4a0155b241c1479ff6b9ce2bd68e8c0a0cd30f5f
132,522
py
Python
src/azure-cli/azure/cli/command_modules/storage/_params.py
GalGrinblat/azure-cli
b30b9cf9f90d01b9b6708cc56b82e32cd7182dae
[ "MIT" ]
1
2021-11-17T18:09:28.000Z
2021-11-17T18:09:28.000Z
src/azure-cli/azure/cli/command_modules/storage/_params.py
GalGrinblat/azure-cli
b30b9cf9f90d01b9b6708cc56b82e32cd7182dae
[ "MIT" ]
1
2021-09-14T14:15:25.000Z
2021-09-14T14:30:44.000Z
src/azure-cli/azure/cli/command_modules/storage/_params.py
GalGrinblat/azure-cli
b30b9cf9f90d01b9b6708cc56b82e32cd7182dae
[ "MIT" ]
2
2021-07-07T12:43:11.000Z
2021-07-09T19:30:53.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from azure.cli.core.profiles import ResourceType from azure.cli.core.commands.validators import get_default_location_from_resource_group from azure.cli.core.commands.parameters import (tags_type, file_type, get_location_type, get_enum_type, get_three_state_flag, edge_zone_type) from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL from ._validators import (get_datetime_type, validate_metadata, get_permission_validator, get_permission_help_string, resource_type_type, services_type, validate_entity, validate_select, validate_blob_type, validate_included_datasets_validator, validate_custom_domain, validate_container_public_access, validate_table_payload_format, add_progress_callback, process_resource_group, storage_account_key_options, process_file_download_namespace, process_metric_update_namespace, get_char_options_validator, validate_bypass, validate_encryption_source, validate_marker, validate_storage_data_plane_list, validate_azcopy_upload_destination_url, validate_azcopy_remove_arguments, as_user_validator, parse_storage_account, validate_delete_retention_days, validate_container_delete_retention_days, validate_file_delete_retention_days, validator_change_feed_retention_days, validate_fs_public_access, validate_logging_version, validate_or_policy, validate_policy, get_api_version_type, blob_download_file_path_validator, blob_tier_validator, validate_subnet) def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines, too-many-branches from argcomplete.completers import FilesCompleter from six import u as unicode_string from knack.arguments import ignore_type, CLIArgumentType from azure.cli.core.commands.parameters import get_resource_name_completion_list from .sdkutil import get_table_data_type from .completers import get_storage_name_completion_list t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService') t_file_service = self.get_sdk('file#FileService') t_queue_service = self.get_sdk('queue#QueueService') t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService') storage_account_type = CLIArgumentType(options_list='--storage-account', help='The name or ID of the storage account.', validator=parse_storage_account, id_part='name') acct_name_type = CLIArgumentType(options_list=['--account-name', '-n'], help='The storage account name.', id_part='name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'), local_context_attribute=LocalContextAttribute( name='storage_account_name', actions=[LocalContextAction.GET])) blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.', completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs', parent='container_name')) container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.', completer=get_storage_name_completion_list(t_base_blob_service, 'list_containers')) directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.', completer=get_storage_name_completion_list(t_file_service, 'list_directories_and_files', parent='share_name')) file_name_type = CLIArgumentType(options_list=['--file-name', '-f'], completer=get_storage_name_completion_list(t_file_service, 'list_directories_and_files', parent='share_name')) share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.', completer=get_storage_name_completion_list(t_file_service, 'list_shares')) table_name_type = CLIArgumentType(options_list=['--table-name', '-t'], completer=get_storage_name_completion_list(t_table_service, 'list_tables')) queue_name_type = CLIArgumentType(options_list=['--queue-name', '-q'], help='The queue name.', completer=get_storage_name_completion_list(t_queue_service, 'list_queues')) progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.', action='store_true', validator=add_progress_callback) socket_timeout_type = CLIArgumentType(help='The socket timeout(secs), used by the service to regulate data flow.', type=int) large_file_share_type = CLIArgumentType( action='store_true', min_api='2019-04-01', help='Enable the capability to support large file shares with more than 5 TiB capacity for storage account.' 'Once the property is enabled, the feature cannot be disabled. Currently only supported for LRS and ' 'ZRS replication types, hence account conversions to geo-redundant accounts would not be possible. ' 'For more information, please refer to https://go.microsoft.com/fwlink/?linkid=2086047.') adds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2019-04-01', arg_group='Azure Files Identity Based Authentication', help='Enable Azure Files Active Directory Domain Service Authentication for ' 'storage account. When --enable-files-adds is set to true, Azure Active ' 'Directory Properties arguments must be provided.') aadds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-11-01', arg_group='Azure Files Identity Based Authentication', help='Enable Azure Active Directory Domain Services authentication for Azure Files') domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties", help="Specify the primary domain that the AD DNS server is authoritative for. " "Required when --enable-files-adds is set to True") net_bios_domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties", help="Specify the NetBIOS domain name. " "Required when --enable-files-adds is set to True") forest_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties", help="Specify the Active Directory forest to get. " "Required when --enable-files-adds is set to True") domain_guid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties", help="Specify the domain GUID. Required when --enable-files-adds is set to True") domain_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties", help="Specify the security identifier (SID). Required when --enable-files-adds " "is set to True") azure_storage_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties", help="Specify the security identifier (SID) for Azure Storage. " "Required when --enable-files-adds is set to True") exclude_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these files where the name ' 'matches the pattern list. For example: *.jpg;*.pdf;exactName. This ' 'option supports wildcard characters (*)') include_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these files where the name ' 'matches the pattern list. For example: *.jpg;*.pdf;exactName. This ' 'option supports wildcard characters (*)') exclude_path_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these paths. This option does not ' 'support wildcard characters (*). Checks relative path prefix. For example: ' 'myFolder;myFolder/subDirName/file.pdf.') include_path_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these paths. This option does ' 'not support wildcard characters (*). Checks relative path prefix. For example:' 'myFolder;myFolder/subDirName/file.pdf') recursive_type = CLIArgumentType(options_list=['--recursive', '-r'], action='store_true', help='Look into sub-directories recursively.') sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \ 'referenced with --id that specifies this value. Can be combined.' t_routing_choice = self.get_models('RoutingChoice', resource_type=ResourceType.MGMT_STORAGE) routing_choice_type = CLIArgumentType( arg_group='Routing Preference', arg_type=get_enum_type(t_routing_choice), help='Routing Choice defines the kind of network routing opted by the user.', min_api='2019-06-01') publish_microsoft_endpoints_type = CLIArgumentType( arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01', help='A boolean flag which indicates whether microsoft routing storage endpoints are to be published.') publish_internet_endpoints_type = CLIArgumentType( arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01', help='A boolean flag which indicates whether internet routing storage endpoints are to be published.') umask_type = CLIArgumentType( help='When creating a file or directory and the parent folder does not have a default ACL, the umask restricts ' 'the permissions of the file or directory to be created. The resulting permission is given by p & ^u, ' 'where p is the permission and u is the umask. For more information, please refer to ' 'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#umask.') permissions_type = CLIArgumentType( help='POSIX access permissions for the file owner, the file owning group, and others. Each class may be ' 'granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) ' 'and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://' 'docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.') timeout_type = CLIArgumentType( help='Request timeout in seconds. Applies to each call to the service.', type=int ) marker_type = CLIArgumentType( help='A string value that identifies the portion of the list of containers to be ' 'returned with the next listing operation. The operation returns the NextMarker value within ' 'the response body if the listing operation did not return all containers remaining to be listed ' 'with the current page. If specified, this generator will begin returning results from the point ' 'where the previous generator stopped.') num_results_type = CLIArgumentType( default=5000, validator=validate_storage_data_plane_list, help='Specify the maximum number to return. If the request does not specify ' 'num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that ' 'if the listing operation crosses a partition boundary, then the service will return a continuation token ' 'for retrieving the remaining of the results. Provide "*" to return all.' ) if_modified_since_type = CLIArgumentType( help='Commence only if modified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')', type=get_datetime_type(False)) if_unmodified_since_type = CLIArgumentType( help='Commence only if unmodified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')', type=get_datetime_type(False)) allow_shared_key_access_type = CLIArgumentType( arg_type=get_three_state_flag(), options_list=['--allow-shared-key-access', '-k'], min_api='2019-04-01', help='Indicate whether the storage account permits requests to be authorized with the account access key via ' 'Shared Key. If false, then all requests, including shared access signatures, must be authorized with ' 'Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.') sas_expiration_period_type = CLIArgumentType( options_list=['--sas-expiration-period', '--sas-exp'], min_api='2021-02-01', help='Expiration period of the SAS Policy assigned to the storage account, DD.HH:MM:SS.' ) key_expiration_period_in_days_type = CLIArgumentType( options_list=['--key-expiration-period-in-days', '--key-exp-days'], min_api='2021-02-01', type=int, help='Expiration period in days of the Key Policy assigned to the storage account' ) allow_cross_tenant_replication_type = CLIArgumentType( arg_type=get_three_state_flag(), options_list=['--allow-cross-tenant-replication', '-r'], min_api='2021-04-01', help='Allow or disallow cross AAD tenant object replication. The default interpretation is true for this ' 'property.') default_share_permission_type = CLIArgumentType( options_list=['--default-share-permission', '-d'], arg_type=get_enum_type(['None', 'StorageFileDataSmbShareContributor', 'StorageFileDataSmbShareElevatedContributor', 'StorageFileDataSmbShareReader']), min_api='2020-08-01-preview', arg_group='Azure Files Identity Based Authentication', help='Default share permission for users using Kerberos authentication if RBAC role is not assigned.') t_blob_tier = self.get_sdk('_generated.models._azure_blob_storage_enums#AccessTierOptional', resource_type=ResourceType.DATA_STORAGE_BLOB) t_rehydrate_priority = self.get_sdk('_generated.models._azure_blob_storage_enums#RehydratePriority', resource_type=ResourceType.DATA_STORAGE_BLOB) tier_type = CLIArgumentType( arg_type=get_enum_type(t_blob_tier), min_api='2019-02-02', help='The tier value to set the blob to. For page blob, the tier correlates to the size of the blob ' 'and number of allowed IOPS. Possible values are P10, P15, P20, P30, P4, P40, P50, P6, P60, P70, P80 ' 'and this is only applicable to page blobs on premium storage accounts; For block blob, possible ' 'values are Archive, Cool and Hot. This is only applicable to block blobs on standard storage accounts.' ) rehydrate_priority_type = CLIArgumentType( arg_type=get_enum_type(t_rehydrate_priority), options_list=('--rehydrate-priority', '-r'), min_api='2019-02-02', help='Indicate the priority with which to rehydrate an archived blob.') action_type = CLIArgumentType( help='The action of virtual network rule. Possible value is Allow.' ) with self.argument_context('storage') as c: c.argument('container_name', container_name_type) c.argument('directory_name', directory_type) c.argument('share_name', share_name_type) c.argument('table_name', table_name_type) c.argument('retry_wait', options_list=('--retry-interval',)) c.ignore('progress_callback') c.argument('metadata', nargs='+', help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.', validator=validate_metadata) c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int) with self.argument_context('storage', arg_group='Precondition') as c: c.argument('if_modified_since', if_modified_since_type) c.argument('if_unmodified_since', if_unmodified_since_type) c.argument('if_match') c.argument('if_none_match') for item in ['delete', 'show', 'update', 'show-connection-string', 'keys', 'network-rule', 'revoke-delegation-keys', 'failover']: # pylint: disable=line-too-long with self.argument_context('storage account {}'.format(item)) as c: c.argument('account_name', acct_name_type, options_list=['--name', '-n']) c.argument('resource_group_name', required=False, validator=process_resource_group) with self.argument_context('storage account blob-inventory-policy') as c: c.ignore('blob_inventory_policy_name') c.argument('resource_group_name', required=False, validator=process_resource_group) c.argument('account_name', help='The name of the storage account within the specified resource group. Storage account names ' 'must be between 3 and 24 characters in length and use numbers and lower-case letters only.') with self.argument_context('storage account blob-inventory-policy create') as c: c.argument('policy', type=file_type, completer=FilesCompleter(), help='The Storage Account Blob Inventory Policy, string in JSON format or json file path. See more ' 'details in https://docs.microsoft.com/azure/storage/blobs/blob-inventory#inventory-policy.') with self.argument_context('storage account check-name') as c: c.argument('name', options_list=['--name', '-n'], help='The name of the storage account within the specified resource group') with self.argument_context('storage account delete') as c: c.argument('account_name', acct_name_type, options_list=['--name', '-n'], local_context_attribute=None) with self.argument_context('storage account create', resource_type=ResourceType.MGMT_STORAGE) as c: t_account_type, t_sku_name, t_kind, t_tls_version = \ self.get_models('AccountType', 'SkuName', 'Kind', 'MinimumTlsVersion', resource_type=ResourceType.MGMT_STORAGE) t_identity_type = self.get_models('IdentityType', resource_type=ResourceType.MGMT_STORAGE) c.register_common_storage_account_options() c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) c.argument('account_type', help='The storage account type', arg_type=get_enum_type(t_account_type)) c.argument('account_name', acct_name_type, options_list=['--name', '-n'], completer=None, local_context_attribute=LocalContextAttribute( name='storage_account_name', actions=[LocalContextAction.SET], scopes=[ALL])) c.argument('kind', help='Indicate the type of storage account.', arg_type=get_enum_type(t_kind), default='StorageV2' if self.cli_ctx.cloud.profile == 'latest' else 'Storage') c.argument('https_only', arg_type=get_three_state_flag(), min_api='2019-04-01', help='Allow https traffic only to storage service if set to true. The default value is true.') c.argument('https_only', arg_type=get_three_state_flag(), max_api='2018-11-01', help='Allow https traffic only to storage service if set to true. The default value is false.') c.argument('tags', tags_type) c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.') c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs')) c.argument('enable_files_aadds', aadds_type) c.argument('enable_files_adds', adds_type) c.argument('enable_large_file_share', arg_type=large_file_share_type) c.argument('domain_name', domain_name_type) c.argument('net_bios_domain_name', net_bios_domain_name_type) c.argument('forest_name', forest_name_type) c.argument('domain_guid', domain_guid_type) c.argument('domain_sid', domain_sid_type) c.argument('azure_storage_sid', azure_storage_sid_type) c.argument('enable_hierarchical_namespace', arg_type=get_three_state_flag(), options_list=['--enable-hierarchical-namespace', '--hns', c.deprecate(target='--hierarchical-namespace', redirect='--hns', hide=True)], help=" Allow the blob service to exhibit filesystem semantics. This property can be enabled only " "when storage account kind is StorageV2.", min_api='2018-02-01') c.argument('encryption_key_type_for_table', arg_type=get_enum_type(['Account', 'Service']), help='Set the encryption key type for Table service. "Account": Table will be encrypted ' 'with account-scoped encryption key. "Service": Table will always be encrypted with ' 'service-scoped keys. Currently the default encryption key type is "Service".', min_api='2019-06-01', options_list=['--encryption-key-type-for-table', '-t']) c.argument('encryption_key_type_for_queue', arg_type=get_enum_type(['Account', 'Service']), help='Set the encryption key type for Queue service. "Account": Queue will be encrypted ' 'with account-scoped encryption key. "Service": Queue will always be encrypted with ' 'service-scoped keys. Currently the default encryption key type is "Service".', min_api='2019-06-01', options_list=['--encryption-key-type-for-queue', '-q']) c.argument('routing_choice', routing_choice_type) c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type) c.argument('publish_internet_endpoints', publish_internet_endpoints_type) c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'], arg_type=get_three_state_flag(), help='A boolean indicating whether or not the service applies a secondary layer of encryption with ' 'platform managed keys for data at rest.') c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01', help='Allow or disallow public access to all blobs or containers in the storage account. ' 'The default value for this property is null, which is equivalent to true. When true, containers ' 'in the account may be configured for public access. Note that setting this property to true does ' 'not enable anonymous access to any data in the account. The additional step of configuring the ' 'public access setting for a container is required to enable anonymous access.') c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version), help='The minimum TLS version to be permitted on requests to storage. ' 'The default interpretation is TLS 1.0 for this property') c.argument('allow_shared_key_access', allow_shared_key_access_type) c.argument('edge_zone', edge_zone_type, min_api='2020-08-01-preview') c.argument('identity_type', arg_type=get_enum_type(t_identity_type), arg_group='Identity', help='The identity type.') c.argument('user_identity_id', arg_group='Identity', help='The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is ' 'permitted here.') c.argument('key_expiration_period_in_days', key_expiration_period_in_days_type, is_preview=True) c.argument('sas_expiration_period', sas_expiration_period_type, is_preview=True) c.argument('allow_cross_tenant_replication', allow_cross_tenant_replication_type) c.argument('default_share_permission', default_share_permission_type) c.argument('enable_nfs_v3', arg_type=get_three_state_flag(), is_preview=True, min_api='2021-01-01', help='NFS 3.0 protocol support enabled if sets to true.') with self.argument_context('storage account private-endpoint-connection', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], help='The name of the private endpoint connection associated with the Storage Account.') for item in ['approve', 'reject', 'show', 'delete']: with self.argument_context('storage account private-endpoint-connection {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False, help='The name of the private endpoint connection associated with the Storage Account.') c.extra('connection_id', options_list=['--id'], help='The ID of the private endpoint connection associated with the Storage Account. You can get ' 'it using `az storage account show`.') c.argument('account_name', help='The storage account name.', required=False) c.argument('resource_group_name', help='The resource group name of specified storage account.', required=False) c.argument('description', help='Comments for {} operation.'.format(item)) with self.argument_context('storage account update', resource_type=ResourceType.MGMT_STORAGE) as c: t_tls_version = self.get_models('MinimumTlsVersion', resource_type=ResourceType.MGMT_STORAGE) t_identity_type = self.get_models('IdentityType', resource_type=ResourceType.MGMT_STORAGE) c.register_common_storage_account_options() c.argument('sku', arg_type=get_enum_type(t_sku_name), help='Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, ' 'nor can accounts of those SKU names be updated to any other value') c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source. Use "" to clear ' 'existing value.', validator=validate_custom_domain) c.argument('use_subdomain', help='Specify whether to use indirect CNAME validation.', arg_type=get_enum_type(['true', 'false'])) c.argument('tags', tags_type, default=None) c.argument('enable_files_aadds', aadds_type) c.argument('enable_files_adds', adds_type) c.argument('enable_large_file_share', arg_type=large_file_share_type) c.argument('domain_name', domain_name_type) c.argument('net_bios_domain_name', net_bios_domain_name_type) c.argument('forest_name', forest_name_type) c.argument('domain_guid', domain_guid_type) c.argument('domain_sid', domain_sid_type) c.argument('azure_storage_sid', azure_storage_sid_type) c.argument('routing_choice', routing_choice_type) c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type) c.argument('publish_internet_endpoints', publish_internet_endpoints_type) c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01', help='Allow or disallow public access to all blobs or containers in the storage account. ' 'The default value for this property is null, which is equivalent to true. When true, containers ' 'in the account may be configured for public access. Note that setting this property to true does ' 'not enable anonymous access to any data in the account. The additional step of configuring the ' 'public access setting for a container is required to enable anonymous access.') c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version), help='The minimum TLS version to be permitted on requests to storage. ' 'The default interpretation is TLS 1.0 for this property') c.argument('allow_shared_key_access', allow_shared_key_access_type) c.argument('identity_type', arg_type=get_enum_type(t_identity_type), arg_group='Identity', help='The identity type.') c.argument('user_identity_id', arg_group='Identity', help='The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is ' 'permitted here.') c.argument('key_expiration_period_in_days', key_expiration_period_in_days_type, is_preview=True) c.argument('sas_expiration_period', sas_expiration_period_type, is_preview=True) c.argument('allow_cross_tenant_replication', allow_cross_tenant_replication_type) c.argument('default_share_permission', default_share_permission_type) for scope in ['storage account create', 'storage account update']: with self.argument_context(scope, arg_group='Customer managed key', min_api='2017-06-01', resource_type=ResourceType.MGMT_STORAGE) as c: t_key_source = self.get_models('KeySource', resource_type=ResourceType.MGMT_STORAGE) c.argument('encryption_key_name', help='The name of the KeyVault key.', ) c.argument('encryption_key_vault', help='The Uri of the KeyVault.') c.argument('encryption_key_version', help='The version of the KeyVault key to use, which will opt out of implicit key rotation. ' 'Please use "" to opt in key auto-rotation again.') c.argument('encryption_key_source', arg_type=get_enum_type(t_key_source), help='The default encryption key source', validator=validate_encryption_source) c.argument('key_vault_user_identity_id', options_list=['--key-vault-user-identity-id', '-u'], min_api='2021-01-01', help='Resource identifier of the UserAssigned identity to be associated with server-side ' 'encryption on the storage account.') for scope in ['storage account create', 'storage account update']: with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01', arg_group='Network Rule') as c: t_bypass, t_default_action = self.get_models('Bypass', 'DefaultAction', resource_type=ResourceType.MGMT_STORAGE) c.argument('bypass', nargs='+', validator=validate_bypass, arg_type=get_enum_type(t_bypass), help='Bypass traffic for space-separated uses.') c.argument('default_action', arg_type=get_enum_type(t_default_action), help='Default action to apply when no rule matches.') c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.') c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet) c.argument('action', action_type) with self.argument_context('storage account show-connection-string') as c: c.argument('protocol', help='The default endpoint protocol.', arg_type=get_enum_type(['http', 'https'])) c.argument('sas_token', help='The SAS token to be used in the connection-string.') c.argument('key_name', options_list=['--key'], help='The key to use.', arg_type=get_enum_type(list(storage_account_key_options.keys()))) for item in ['blob', 'file', 'queue', 'table']: c.argument('{}_endpoint'.format(item), help='Custom endpoint for {}s.'.format(item)) with self.argument_context('storage account encryption-scope') as c: c.argument('account_name', help='The storage account name.') c.argument('resource_group_name', validator=process_resource_group, required=False) c.argument('encryption_scope_name', options_list=['--name', '-n'], help='The name of the encryption scope within the specified storage account.') for scope in ['storage account encryption-scope create', 'storage account encryption-scope update']: with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE) as c: from ._validators import validate_encryption_key t_encryption_key_source = self.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE) c.argument('key_source', options_list=['-s', '--key-source'], arg_type=get_enum_type(t_encryption_key_source, default="Microsoft.Storage"), help='The provider for the encryption scope.', validator=validate_encryption_key) c.argument('key_uri', options_list=['-u', '--key-uri'], help='The object identifier for a key vault key object. When applied, the encryption scope will ' 'use the key referenced by the identifier to enable customer-managed key support on this ' 'encryption scope.') c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'], arg_type=get_three_state_flag(), min_api='2021-01-01', help='A boolean indicating whether or not the service applies a secondary layer of encryption ' 'with platform managed keys for data at rest.') with self.argument_context('storage account encryption-scope update') as c: t_state = self.get_models("EncryptionScopeState", resource_type=ResourceType.MGMT_STORAGE) c.argument('key_source', options_list=['-s', '--key-source'], arg_type=get_enum_type(t_encryption_key_source), help='The provider for the encryption scope.', validator=validate_encryption_key) c.argument('state', arg_type=get_enum_type(t_state), help='Change the state the encryption scope. When disabled, ' 'all blob read/write operations using this encryption scope will fail.') with self.argument_context('storage account keys list', resource_type=ResourceType.MGMT_STORAGE) as c: t_expand_key_type = self.get_models('ListKeyExpand', resource_type=ResourceType.MGMT_STORAGE) c.argument("expand", options_list=['--expand-key-type'], help='Specify the expanded key types to be listed.', arg_type=get_enum_type(t_expand_key_type), min_api='2019-04-01', is_preview=True) with self.argument_context('storage account keys renew', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('key_name', options_list=['--key'], help='The key options to regenerate.', arg_type=get_enum_type(list(storage_account_key_options.keys()))) c.extra('key_type', help='The key type to regenerate. If --key-type is not specified, one of access keys will ' 'be regenerated by default.', arg_type=get_enum_type(['kerb']), min_api='2019-04-01') c.argument('account_name', acct_name_type, id_part=None) with self.argument_context('storage account management-policy create') as c: c.argument('policy', type=file_type, completer=FilesCompleter(), help='The Storage Account ManagementPolicies Rules, in JSON format. See more details in: ' 'https://docs.microsoft.com/azure/storage/common/storage-lifecycle-managment-concepts.') for item in ['create', 'update', 'show', 'delete']: with self.argument_context('storage account management-policy {}'.format(item)) as c: c.argument('account_name', help='The name of the storage account within the specified resource group.') with self.argument_context('storage account keys list') as c: c.argument('account_name', acct_name_type, id_part=None) with self.argument_context('storage account network-rule', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('account_name', acct_name_type, id_part=None) c.argument('ip_address', help='IPv4 address or CIDR range.') c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.') c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet) c.argument('action', action_type) c.argument('resource_id', help='The resource id to add in network rule.', arg_group='Resource Access Rule', min_api='2020-08-01-preview') c.argument('tenant_id', help='The tenant id to add in network rule.', arg_group='Resource Access Rule', min_api='2020-08-01-preview') with self.argument_context('storage account blob-service-properties show', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('account_name', acct_name_type, id_part=None) c.argument('resource_group_name', required=False, validator=process_resource_group) with self.argument_context('storage account blob-service-properties update', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('account_name', acct_name_type, id_part=None) c.argument('resource_group_name', required=False, validator=process_resource_group) c.argument('enable_change_feed', arg_type=get_three_state_flag(), min_api='2019-04-01', arg_group='Change Feed Policy') c.argument('change_feed_retention_days', is_preview=True, options_list=['--change-feed-retention-days', '--change-feed-days'], type=int, min_api='2019-06-01', arg_group='Change Feed Policy', validator=validator_change_feed_retention_days, help='Indicate the duration of changeFeed retention in days. ' 'Minimum value is 1 day and maximum value is 146000 days (400 years). ' 'A null value indicates an infinite retention of the change feed.' '(Use `--enable-change-feed` without `--change-feed-days` to indicate null)') c.argument('enable_container_delete_retention', arg_type=get_three_state_flag(), options_list=['--enable-container-delete-retention', '--container-retention'], arg_group='Container Delete Retention Policy', min_api='2019-06-01', help='Enable container delete retention policy for container soft delete when set to true. ' 'Disable container delete retention policy when set to false.') c.argument('container_delete_retention_days', options_list=['--container-delete-retention-days', '--container-days'], type=int, arg_group='Container Delete Retention Policy', min_api='2019-06-01', validator=validate_container_delete_retention_days, help='Indicate the number of days that the deleted container should be retained. The minimum ' 'specified value can be 1 and the maximum value can be 365.') c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy', min_api='2018-07-01') c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy', validator=validate_delete_retention_days, min_api='2018-07-01') c.argument('enable_restore_policy', arg_type=get_three_state_flag(), arg_group='Restore Policy', min_api='2019-06-01', help="Enable blob restore policy when it set to true.") c.argument('restore_days', type=int, arg_group='Restore Policy', min_api='2019-06-01', help="The number of days for the blob can be restored. It should be greater " "than zero and less than Delete Retention Days.") c.argument('enable_versioning', arg_type=get_three_state_flag(), help='Versioning is enabled if set to true.', min_api='2019-06-01') c.argument('default_service_version', options_list=['--default-service-version', '-d'], type=get_api_version_type(), min_api='2018-07-01', help="Indicate the default version to use for requests to the Blob service if an incoming request's " "version is not specified.") c.argument('enable_last_access_tracking', arg_type=get_three_state_flag(), min_api='2019-06-01', options_list=['--enable-last-access-tracking', '-t'], help='When set to true last access time based tracking policy is enabled.') with self.argument_context('storage account file-service-properties show', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('account_name', acct_name_type, id_part=None) c.argument('resource_group_name', required=False, validator=process_resource_group) with self.argument_context('storage account file-service-properties update', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('account_name', acct_name_type, id_part=None) c.argument('resource_group_name', required=False, validator=process_resource_group) c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy', min_api='2019-06-01', help='Enable file service properties for share soft delete.') c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy', validator=validate_file_delete_retention_days, min_api='2019-06-01', help='Indicate the number of days that the deleted item should be retained. The minimum specified ' 'value can be 1 and the maximum value can be 365.') c.argument('enable_smb_multichannel', options_list=['--enable-smb-multichannel', '--mc'], arg_type=get_three_state_flag(), min_api='2020-08-01-preview', arg_group='SMB Setting', help='Set SMB Multichannel setting for file service. Applies to Premium FileStorage only.') c.argument('versions', arg_group='SMB Setting', min_api='2020-08-01-preview', help="SMB protocol versions supported by server. Valid values are SMB2.1, SMB3.0, " "SMB3.1.1. Should be passed as a string with delimiter ';'.") c.argument('authentication_methods', options_list='--auth-methods', arg_group='SMB Setting', min_api='2020-08-01-preview', help="SMB authentication methods supported by server. Valid values are NTLMv2, Kerberos. " "Should be passed as a string with delimiter ';'.") c.argument('kerberos_ticket_encryption', options_list=['--kerb-ticket-encryption', '-k'], arg_group='SMB Setting', min_api='2020-08-01-preview', help="Kerberos ticket encryption supported by server. Valid values are RC4-HMAC, AES-256. " "Should be passed as a string with delimiter ';'.") c.argument('channel_encryption', arg_group='SMB Setting', min_api='2020-08-01-preview', help="SMB channel encryption supported by server. Valid values are AES-128-CCM, AES-128-GCM, " "AES-256-GCM. Should be passed as a string with delimiter ';' ") with self.argument_context('storage account generate-sas') as c: t_account_permissions = self.get_sdk('common.models#AccountPermissions') c.register_sas_arguments() c.argument('services', type=services_type(self)) c.argument('resource_types', type=resource_type_type(self)) c.argument('expiry', type=get_datetime_type(True)) c.argument('start', type=get_datetime_type(True)) c.argument('account_name', acct_name_type, options_list=['--account-name']) c.argument('permission', options_list=('--permissions',), help='The permissions the SAS grants. Allowed values: {}. Can be combined.'.format( get_permission_help_string(t_account_permissions)), validator=get_permission_validator(t_account_permissions)) c.ignore('sas_token') or_policy_type = CLIArgumentType( options_list=['--policy', '-p'], help='The object replication policy definition between two storage accounts, in JSON format. ' 'Multiple rules can be defined in one policy.' ) policy_id_type = CLIArgumentType( options_list=['--policy-id'], help='The ID of object replication policy or "default" if the policy ID is unknown. Policy Id will be ' 'auto-generated when setting on destination account. Required when setting on source account.' ) rule_id_type = CLIArgumentType( options_list=['--rule-id', '-r'], help='Rule Id is auto-generated for each new rule on destination account. It is required ' 'for put policy on source account.' ) prefix_math_type = CLIArgumentType( nargs='+', arg_group='Filters', options_list=['--prefix-match', '--prefix'], help='Optional. Filter the results to replicate only blobs whose names begin with the specified ' 'prefix.' ) min_creation_time_type = CLIArgumentType( options_list=['--min-creation-time', '-t'], arg_group='Filters', type=get_datetime_type(True), help="Blobs created after the time will be replicated to the destination. It must be in datetime format " "'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z") with self.argument_context('storage account or-policy') as c: c.argument('account_name', acct_name_type, id_part=None) c.argument('resource_group_name', required=False, validator=process_resource_group) c.argument('object_replication_policy_id', policy_id_type) c.argument('policy_id', policy_id_type) c.argument('source_account', options_list=['--source-account', '-s'], help='The source storage account name or resource Id. Required when no --policy provided.') c.argument('destination_account', options_list=['--destination-account', '-d'], help='The destination storage account name or resource Id. Apply --account-name value as ' 'destination account when there is no destination account provided in --policy and ' '--destination-account.') c.argument('properties', or_policy_type) c.argument('prefix_match', prefix_math_type) c.argument('min_creation_time', min_creation_time_type) for item in ['create', 'update']: with self.argument_context('storage account or-policy {}'.format(item), arg_group="Object Replication Policy Rule") as c: c.argument('rule_id', help='Rule Id is auto-generated for each new rule on destination account. It is ' 'required for put policy on source account.') c.argument('source_container', options_list=['--source-container', '--scont'], help='The source storage container name. Required when no --policy provided.') c.argument('destination_container', options_list=['--destination-container', '--dcont'], help='The destination storage container name. Required when no --policy provided.') with self.argument_context('storage account or-policy create') as c: c.argument('properties', or_policy_type, validator=validate_or_policy) with self.argument_context('storage account or-policy rule') as c: c.argument('policy_id', policy_id_type) c.argument('source_container', options_list=['--source-container', '-s'], help='The source storage container name.') c.argument('destination_container', options_list=['--destination-container', '-d'], help='The destination storage container name.') c.argument('rule_id', rule_id_type) for item in ['show', 'off']: with self.argument_context('storage logging {}'.format(item)) as c: c.extra('services', validator=get_char_options_validator('bqt', 'services'), default='bqt') with self.argument_context('storage logging update') as c: c.extra('services', validator=get_char_options_validator('bqt', 'services'), options_list='--services', required=True) c.argument('log', validator=get_char_options_validator('rwd', 'log')) c.argument('retention', type=int) c.argument('version', type=float, validator=validate_logging_version) with self.argument_context('storage metrics show') as c: c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bfqt') c.argument('interval', arg_type=get_enum_type(['hour', 'minute', 'both'])) with self.argument_context('storage metrics update') as c: c.extra('services', validator=get_char_options_validator('bfqt', 'services'), options_list='--services', required=True) c.argument('hour', validator=process_metric_update_namespace, arg_type=get_enum_type(['true', 'false'])) c.argument('minute', arg_type=get_enum_type(['true', 'false'])) c.argument('api', arg_type=get_enum_type(['true', 'false'])) c.argument('retention', type=int) with self.argument_context('storage blob') as c: c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type) c.argument('destination_path', help='The destination path that will be prepended to the blob name.') with self.argument_context('storage blob list') as c: from ._validators import get_include_help_string t_blob_include = self.get_sdk('_generated.models._azure_blob_storage_enums#ListBlobsIncludeItem', resource_type=ResourceType.DATA_STORAGE_BLOB) c.register_container_arguments() c.argument('delimiter', help='When the request includes this parameter, the operation returns a BlobPrefix element in the ' 'result list that acts as a placeholder for all blobs whose names begin with the same substring ' 'up to the appearance of the delimiter character. The delimiter may be a single character or a ' 'string.') c.argument('include', help="Specify one or more additional datasets to include in the response. " "Options include: {}. Can be combined.".format(get_include_help_string(t_blob_include)), validator=validate_included_datasets_validator(include_class=t_blob_include)) c.argument('marker', arg_type=marker_type) c.argument('num_results', arg_type=num_results_type) c.argument('prefix', help='Filter the results to return only blobs whose name begins with the specified prefix.') c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') with self.argument_context('storage blob generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_blob_permissions = self.get_sdk('blob.models#BlobPermissions') c.register_sas_arguments() c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed ' 'using this shared access signature.') c.argument('content_disposition', help='Response header value for Content-Disposition when resource is ' 'accessed using this shared access signature.') c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed ' 'using this shared access signature.') c.argument('content_language', help='Response header value for Content-Language when resource is accessed ' 'using this shared access signature.') c.argument('content_type', help='Response header value for Content-Type when resource is accessed ' 'using this shared access signature.') c.argument('full_uri', action='store_true', help='Indicates that this command return the full blob URI and the shared access signature token.') c.argument('as_user', min_api='2018-11-09', action='store_true', validator=as_user_validator, help="Indicates that this command return the SAS signed with the user delegation key. " "The expiry parameter and '--auth-mode login' are required if this argument is specified. ") c.argument('id', options_list='--policy-name', validator=validate_policy, help='The name of a stored access policy within the container\'s ACL.', completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name', 'get_container_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_blob_permissions)), validator=get_permission_validator(t_blob_permissions)) c.ignore('sas_token') with self.argument_context('storage blob restore', resource_type=ResourceType.MGMT_STORAGE) as c: from ._validators import BlobRangeAddAction c.argument('blob_ranges', options_list=['--blob-range', '-r'], action=BlobRangeAddAction, nargs='+', help='Blob ranges to restore. You need to two values to specify start_range and end_range for each ' 'blob range, e.g. -r blob1 blob2. Note: Empty means account start as start range value, and ' 'means account end for end range.') c.argument('account_name', acct_name_type, id_part=None) c.argument('resource_group_name', required=False, validator=process_resource_group) c.argument('time_to_restore', type=get_datetime_type(True), options_list=['--time-to-restore', '-t'], help='Restore blob to the specified time, which should be UTC datetime in (Y-m-d\'T\'H:M:S\'Z\').') with self.argument_context('storage blob rewrite', resource_type=ResourceType.DATA_STORAGE_BLOB, min_api='2020-04-08') as c: c.register_blob_arguments() c.register_precondition_options() c.argument('source_url', options_list=['--source-uri', '-u'], help='A URL of up to 2 KB in length that specifies a file or blob. The value should be URL-encoded ' 'as it would appear in a request URI. If the source is in another account, the source must either ' 'be public or must be authenticated via a shared access signature. If the source is public, no ' 'authentication is required.') c.extra('lease', options_list='--lease-id', help='Required if the blob has an active lease. Value can be a BlobLeaseClient object ' 'or the lease ID as a string.') c.extra('standard_blob_tier', arg_type=get_enum_type(t_blob_tier), options_list='--tier', help='A standard blob tier value to set the blob to. For this version of the library, ' 'this is only applicable to block blobs on standard storage accounts.') c.extra('encryption_scope', help='A predefined encryption scope used to encrypt the data on the service. An encryption scope ' 'can be created using the Management API and referenced here by name. If a default encryption scope ' 'has been defined at the container, this value will override it if the container-level scope is ' 'configured to allow overrides. Otherwise an error will be raised.') with self.argument_context('storage blob update') as c: t_blob_content_settings = self.get_sdk('blob.models#ContentSettings') c.register_content_settings_argument(t_blob_content_settings, update=True) with self.argument_context('storage blob exists') as c: c.argument('blob_name', required=True) with self.argument_context('storage blob url') as c: c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.') c.argument('snapshot', help='An string value that uniquely identifies the snapshot. The value of ' 'this query parameter indicates the snapshot version.') with self.argument_context('storage blob set-tier') as c: from azure.cli.command_modules.storage._validators import (blob_rehydrate_priority_validator) c.register_blob_arguments() c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page'))) c.argument('tier', validator=blob_tier_validator) c.argument('rehydrate_priority', options_list=('--rehydrate-priority', '-r'), arg_type=get_enum_type(('High', 'Standard')), validator=blob_rehydrate_priority_validator, is_preview=True, help="Indicate the priority with which to rehydrate an archived blob. " "The priority can be set on a blob only once, default value is Standard.") with self.argument_context('storage blob service-properties delete-policy update') as c: c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.') c.argument('days_retained', type=int, help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].') with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c: c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete', help='Enables soft-delete.') c.argument('delete_retention_period', type=int, arg_group='Soft Delete', help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].') c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(), help='Enables static-website.') c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".', arg_group='Static Website') c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website', help='Represents the path to the error document that should be shown when an error 404 is issued,' ' in other words, when a browser requests a page that does not exist.') with self.argument_context('storage blob show') as c: c.register_blob_arguments() c.register_precondition_options() c.extra('snapshot', help='The snapshot parameter is an opaque DateTime value that, when present, ' 'specifies the blob snapshot to retrieve.') c.argument('lease_id', help='Required if the blob has an active lease.') with self.argument_context('storage blob upload') as c: from ._validators import page_blob_tier_validator, validate_encryption_scope_client_params from .sdkutil import get_blob_types, get_blob_tier_names t_blob_content_settings = self.get_sdk('blob.models#ContentSettings') c.register_content_settings_argument(t_blob_content_settings, update=False) c.register_blob_arguments() c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter()) c.argument('max_connections', type=int) c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type, arg_type=get_enum_type(get_blob_types())) c.argument('validate_content', action='store_true', min_api='2016-05-31') c.extra('no_progress', progress_type) c.extra('socket_timeout', socket_timeout_type) # TODO: Remove once #807 is complete. Smart Create Generation requires this parameter. # register_extra_cli_argument('storage blob upload', '_subscription_id', options_list=('--subscription',), # help=argparse.SUPPRESS) c.argument('tier', validator=page_blob_tier_validator, arg_type=get_enum_type(get_blob_tier_names(self.cli_ctx, 'PremiumPageBlobTier')), min_api='2017-04-17') c.argument('encryption_scope', validator=validate_encryption_scope_client_params, help='A predefined encryption scope used to encrypt the data on the service.') with self.argument_context('storage blob upload-batch') as c: from .sdkutil import get_blob_types t_blob_content_settings = self.get_sdk('blob.models#ContentSettings') c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control') c.ignore('source_files', 'destination_container_name') c.argument('source', options_list=('--source', '-s')) c.argument('destination', options_list=('--destination', '-d')) c.argument('max_connections', type=int, help='Maximum number of parallel connections to use when the blob size exceeds 64MB.') c.argument('maxsize_condition', arg_group='Content Control') c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control') c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types())) c.extra('no_progress', progress_type) c.extra('socket_timeout', socket_timeout_type) with self.argument_context('storage blob download') as c: c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter(), validator=blob_download_file_path_validator) c.argument('max_connections', type=int) c.argument('start_range', type=int) c.argument('end_range', type=int) c.argument('validate_content', action='store_true', min_api='2016-05-31') c.extra('no_progress', progress_type) c.extra('socket_timeout', socket_timeout_type) with self.argument_context('storage blob download-batch') as c: c.ignore('source_container_name') c.argument('destination', options_list=('--destination', '-d')) c.argument('source', options_list=('--source', '-s')) c.extra('no_progress', progress_type) c.extra('socket_timeout', socket_timeout_type) c.argument('max_connections', type=int, help='Maximum number of parallel connections to use when the blob size exceeds 64MB.') with self.argument_context('storage blob delete') as c: from .sdkutil import get_delete_blob_snapshot_type_names c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names())) with self.argument_context('storage blob delete-batch') as c: c.ignore('source_container_name') c.argument('source', options_list=('--source', '-s')) c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()), help='Required if the blob has associated snapshots.') c.argument('lease_id', help='The active lease id for the blob.') with self.argument_context('storage blob lease') as c: c.argument('blob_name', arg_type=blob_name_type) with self.argument_context('storage blob lease acquire') as c: c.register_precondition_options() c.register_blob_arguments() c.extra('lease_id', options_list='--proposed-lease-id', help='Proposed lease ID, in a GUID string format. ' 'The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format.') c.argument('lease_duration', help='Specify the duration of the lease, in seconds, or negative one (-1) for ' 'a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease ' 'duration cannot be changed using renew or change. Default is -1 (infinite lease)', type=int) with self.argument_context('storage blob lease break') as c: c.register_precondition_options() c.register_blob_arguments() c.argument('lease_break_period', type=int, help="This is the proposed duration of seconds that the lease should continue before it is broken, " "between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining " "on the lease. If longer, the time remaining on the lease is used. A new lease will not be " "available before the break period has expired, but the lease may be held for longer than the break " "period. If this header does not appear with a break operation, a fixed-duration lease breaks after " "the remaining lease period elapses, and an infinite lease breaks immediately.") with self.argument_context('storage blob lease change') as c: c.register_precondition_options() c.register_blob_arguments() c.extra('proposed_lease_id', help='Proposed lease ID, in a GUID string format. The Blob service returns 400 ' '(Invalid request) if the proposed lease ID is not in the correct format.', required=True) c.extra('lease_id', help='Required if the blob has an active lease.', required=True) for item in ['release', 'renew']: with self.argument_context('storage blob lease {}'.format(item)) as c: c.register_precondition_options() c.register_blob_arguments() c.extra('lease_id', help='Required if the blob has an active lease.', required=True) with self.argument_context('storage copy') as c: c.argument('destination', options_list=['--destination', '-d', c.deprecate(target='--destination-local-path', redirect='--destination')], help="The path/url of copy destination. " "It can be a local path, an url to azure storage server. If you provide destination parameter " "here, you do not need to provide arguments in copy destination arguments group and copy " "destination arguments will be deprecated in future.", required=False) c.argument('source', options_list=['--source', '-s', c.deprecate(target='--source-local-path', redirect='--source')], help="The path/url of copy source. It can be a local" " path, an url to azure storage server or AWS S3 buckets. If you provide source parameter here," " you do not need to provide arguments in copy source arguments group and copy source arguments" " will be deprecated in future.", required=False) for item in ['destination', 'source']: c.extra('{}_container'.format(item), arg_group='Copy {}'.format(item), help='Container name of copy {} storage account'.format(item)) c.extra('{}_blob'.format(item), arg_group='Copy {}'.format(item), help='Blob name in blob container of copy {} storage account'.format(item)) c.extra('{}_share'.format(item), arg_group='Copy {}'.format(item), help='File share name of copy {} storage account'.format(item)) c.extra('{}_file_path'.format(item), arg_group='Copy {}'.format(item), help='File path in file share of copy {} storage account'.format(item)) c.argument('account_name', acct_name_type, arg_group='Storage Account', id_part=None, options_list=['--account-name', c.deprecate(target='--destination-account-name', redirect='--account-name')], help='Storage account name of copy destination') c.extra('source_account_name', arg_group='Copy source', help='Account name of copy source storage account.') c.extra('source_account_key', arg_group='Copy source', help='Account key of copy source storage account. Must be used in conjunction with source storage ' 'account name.') c.extra('source_connection_string', arg_group='Copy source', options_list=['--source-connection-string', '--src-conn'], help='Connection string of source storage account.') c.extra('source_sas', arg_group='Copy source', help='Shared Access Signature (SAS) token of copy source. Must be used in conjunction with source ' 'storage account name.') c.argument('put_md5', arg_group='Additional Flags', action='store_true', help='Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the ' 'destination blob/file.Only available when uploading.') c.argument('blob_type', arg_group='Additional Flags', arg_type=get_enum_type(["BlockBlob", "PageBlob", "AppendBlob"]), help='The type of blob at the destination.') c.argument('preserve_s2s_access_tier', arg_group='Additional Flags', arg_type=get_three_state_flag(), help='Preserve access tier during service to service copy. ' 'Please refer to https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers ' 'to ensure destination storage account support setting access tier. In the cases that setting ' 'access tier is not supported, please use `--preserve-s2s-access-tier false` to bypass copying ' 'access tier. (Default true)') c.argument('exclude_pattern', exclude_pattern_type) c.argument('include_pattern', include_pattern_type) c.argument('exclude_path', exclude_path_type) c.argument('include_path', include_path_type) c.argument('recursive', recursive_type) c.argument('content_type', arg_group='Additional Flags', help="Specify content type of the file. ") c.argument('follow_symlinks', arg_group='Additional Flags', action='store_true', help='Follow symbolic links when uploading from local file system.') c.argument('cap_mbps', arg_group='Additional Flags', help="Caps the transfer rate, in megabits per second. " "Moment-by-moment throughput might vary slightly from the cap. " "If this option is set to zero, or it is omitted, the throughput isn't capped. ") with self.argument_context('storage blob copy') as c: for item in ['destination', 'source']: c.argument('{}_if_modified_since'.format(item), arg_group='Pre-condition', arg_type=if_modified_since_type) c.argument('{}_if_unmodified_since'.format(item), arg_group='Pre-condition', arg_type=if_unmodified_since_type) c.argument('{}_if_match'.format(item), arg_group='Pre-condition') c.argument('{}_if_none_match'.format(item), arg_group='Pre-condition') c.argument('container_name', container_name_type, options_list=('--destination-container', '-c')) c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'), help='Name of the destination blob. If the exists, it will be overwritten.') c.argument('source_lease_id', arg_group='Copy Source') with self.argument_context('storage blob copy start', resource_type=ResourceType.DATA_STORAGE_BLOB) as c: from ._validators import validate_source_url c.register_blob_arguments() c.register_precondition_options() c.register_precondition_options(prefix='source_') c.register_source_uri_arguments(validator=validate_source_url) c.ignore('incremental_copy') c.argument('if_match', options_list=['--destination-if-match']) c.argument('if_modified_since', options_list=['--destination-if-modified-since']) c.argument('if_none_match', options_list=['--destination-if-none-match']) c.argument('if_unmodified_since', options_list=['--destination-if-unmodified-since']) c.argument('if_tags_match_condition', options_list=['--destination-tags-condition']) c.argument('blob_name', options_list=['--destination-blob', '-b'], required=True, help='Name of the destination blob. If the exists, it will be overwritten.') c.argument('container_name', options_list=['--destination-container', '-c'], required=True, help='The container name.') c.extra('destination_lease', options_list='--destination-lease-id', help='The lease ID specified for this header must match the lease ID of the estination blob. ' 'If the request does not include the lease ID or it is not valid, the operation fails with status ' 'code 412 (Precondition Failed).') c.extra('source_lease', options_list='--source-lease-id', arg_group='Copy Source', help='Specify this to perform the Copy Blob operation only if the lease ID given matches the ' 'active lease ID of the source blob.') c.extra('rehydrate_priority', rehydrate_priority_type) c.extra('requires_sync', arg_type=get_three_state_flag(), help='Enforce that the service will not return a response until the copy is complete.') c.extra('tier', tier_type) c.extra('tags', tags_type) with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c: from azure.cli.command_modules.storage._validators import get_source_file_or_blob_service_client c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client) c.extra('source_account_name') c.extra('source_account_key') c.extra('source_uri') c.argument('source_sas') c.argument('source_container') c.argument('source_share') with self.argument_context('storage blob incremental-copy start') as c: from azure.cli.command_modules.storage._validators import process_blob_source_uri c.register_source_uri_arguments(validator=process_blob_source_uri, blob_only=True) c.argument('destination_if_modified_since', arg_group='Pre-condition', arg_type=if_modified_since_type) c.argument('destination_if_unmodified_since', arg_group='Pre-condition', arg_type=if_unmodified_since_type) c.argument('destination_if_match', arg_group='Pre-condition') c.argument('destination_if_none_match', arg_group='Pre-condition') c.argument('container_name', container_name_type, options_list=('--destination-container', '-c')) c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'), help='Name of the destination blob. If the exists, it will be overwritten.') c.argument('source_lease_id', arg_group='Copy Source') with self.argument_context('storage blob query') as c: from ._validators import validate_text_configuration c.register_blob_arguments() c.register_precondition_options() line_separator = CLIArgumentType(help="The string used to separate records.", default='\n') column_separator = CLIArgumentType(help="The string used to separate columns.", default=',') quote_char = CLIArgumentType(help="The string used to quote a specific field.", default='"') record_separator = CLIArgumentType(help="The string used to separate records.", default='\n') escape_char = CLIArgumentType(help="The string used as an escape character. Default to empty.", default="") has_header = CLIArgumentType( arg_type=get_three_state_flag(), help="Whether the blob data includes headers in the first line. " "The default value is False, meaning that the data will be returned inclusive of the first line. " "If set to True, the data will be returned exclusive of the first line.", default=False) c.extra('lease', options_list='--lease-id', help='Required if the blob has an active lease.') c.argument('query_expression', help='The query expression in SQL. The maximum size of the query expression ' 'is 256KiB. For more information about the expression syntax, please see ' 'https://docs.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference') c.extra('input_format', arg_type=get_enum_type(['csv', 'json']), validator=validate_text_configuration, help='Serialization type of the data currently stored in the blob. ' 'The default is to treat the blob data as CSV data formatted in the default dialect.' 'The blob data will be reformatted according to that profile when blob format is specified. ' 'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; ' 'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.') c.extra('output_format', arg_type=get_enum_type(['csv', 'json']), help='Output serialization type for the data stream. ' 'By default the data will be returned as it is represented in the blob. ' 'By providing an output format, the blob data will be reformatted according to that profile. ' 'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; ' 'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.') c.extra('in_line_separator', arg_group='Input Json Text Configuration', arg_type=line_separator) c.extra('in_column_separator', arg_group='Input Delimited Text Configuration', arg_type=column_separator) c.extra('in_quote_char', arg_group='Input Delimited Text Configuration', arg_type=quote_char) c.extra('in_record_separator', arg_group='Input Delimited Text Configuration', arg_type=record_separator) c.extra('in_escape_char', arg_group='Input Delimited Text Configuration', arg_type=escape_char) c.extra('in_has_header', arg_group='Input Delimited Text Configuration', arg_type=has_header) c.extra('out_line_separator', arg_group='Output Json Text Configuration', arg_type=line_separator) c.extra('out_column_separator', arg_group='Output Delimited Text Configuration', arg_type=column_separator) c.extra('out_quote_char', arg_group='Output Delimited Text Configuration', arg_type=quote_char) c.extra('out_record_separator', arg_group='Output Delimited Text Configuration', arg_type=record_separator) c.extra('out_escape_char', arg_group='Output Delimited Text Configuration', arg_type=escape_char) c.extra('out_has_header', arg_group='Output Delimited Text Configuration', arg_type=has_header) c.extra('result_file', help='Specify the file path to save result.') c.ignore('input_config') c.ignore('output_config') with self.argument_context('storage blob sync') as c: c.extra('destination_container', options_list=['--container', '-c'], required=True, help='The sync destination container.') c.extra('destination_path', options_list=['--destination', '-d'], validator=validate_azcopy_upload_destination_url, help='The sync destination path.') c.argument('source', options_list=['--source', '-s'], help='The source file path to sync from.') c.ignore('destination') c.argument('exclude_pattern', exclude_pattern_type) c.argument('include_pattern', include_pattern_type) c.argument('exclude_path', exclude_path_type) with self.argument_context('storage container') as c: from .sdkutil import get_container_access_type_names c.argument('container_name', container_name_type, options_list=('--name', '-n')) c.argument('public_access', validator=validate_container_public_access, arg_type=get_enum_type(get_container_access_type_names()), help='Specifies whether data in the container may be accessed publicly.') with self.argument_context('storage container create') as c: c.argument('container_name', container_name_type, options_list=('--name', '-n'), completer=None) c.argument('fail_on_exist', help='Throw an exception if the container already exists.') c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.') c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'], arg_group='Encryption Policy', is_preview=True, help='Default the container to use specified encryption scope for all writes.') c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'], arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True, help='Block override of encryption scope from the container default.') with self.argument_context('storage container delete') as c: c.argument('fail_not_exist', help='Throw an exception if the container does not exist.') c.argument('bypass_immutability_policy', action='store_true', help='Bypasses upcoming service behavior that ' 'will block a container from being deleted if it has a immutability-policy. Specifying this will ' 'ignore arguments aside from those used to identify the container ("--name", "--account-name").') c.argument('lease_id', help="If specified, delete_container only succeeds if the container's lease is active " "and matches this ID. Required if the container has an active lease.") c.ignore('processed_resource_group') c.ignore('processed_account_name') c.ignore('mgmt_client') with self.argument_context('storage container exists') as c: c.ignore('blob_name', 'snapshot') for item in ['create', 'extend']: with self.argument_context('storage container immutability-policy {}'.format(item)) as c: c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.') c.argument('if_match', help="An ETag value, or the wildcard character (*). Specify this header to perform " "the operation only if the resource's ETag matches the value specified.") c.extra('allow_protected_append_writes', options_list=['--allow-protected-append-writes', '-w'], arg_type=get_three_state_flag(), help='This property can only be changed for unlocked time-based ' 'retention policies. When enabled, new blocks can be ' 'written to an append blob while maintaining immutability ' 'protection and compliance. Only new blocks can be added ' 'and any existing blocks cannot be modified or deleted. ' 'This property cannot be changed with ' 'ExtendImmutabilityPolicy API.') c.extra('period', type=int, help='The immutability period for the blobs in the container since the policy ' 'creation, in days.') c.ignore('parameters') with self.argument_context('storage container list') as c: c.argument('num_results', arg_type=num_results_type) with self.argument_context('storage container set-permission') as c: c.ignore('signed_identifiers') with self.argument_context('storage container lease') as c: c.argument('container_name', container_name_type) with self.argument_context('storage container') as c: c.argument('account_name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) c.argument('resource_group_name', required=False, validator=process_resource_group) with self.argument_context('storage container immutability-policy') as c: c.argument('immutability_period_since_creation_in_days', options_list='--period') c.argument('container_name', container_name_type) with self.argument_context('storage container legal-hold') as c: c.argument('container_name', container_name_type) c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.') c.argument('tags', nargs='+', help='Space-separated tags. Each tag should be 3 to 23 alphanumeric characters and is normalized ' 'to lower case') with self.argument_context('storage container policy') as c: from .completers import get_storage_acl_name_completion_list t_container_permissions = self.get_sdk('blob.models#ContainerPermissions') c.argument('container_name', container_name_type) c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.', completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name', 'get_container_acl')) help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_container_permissions)) c.argument('permission', options_list='--permissions', help=help_str, validator=get_permission_validator(t_container_permissions)) c.argument('start', type=get_datetime_type(True), help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.') c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')') for item in ['create', 'delete', 'list', 'show', 'update']: with self.argument_context('storage container policy {}'.format(item)) as c: c.extra('lease_id', options_list='--lease-id', help='The container lease ID.') with self.argument_context('storage container generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_container_permissions = self.get_sdk('blob.models#ContainerPermissions') c.register_sas_arguments() c.argument('id', options_list='--policy-name', validator=validate_policy, help='The name of a stored access policy within the container\'s ACL.', completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name', 'get_container_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_container_permissions)), validator=get_permission_validator(t_container_permissions)) c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed ' 'using this shared access signature.') c.argument('content_disposition', help='Response header value for Content-Disposition when resource is ' 'accessed using this shared access signature.') c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed ' 'using this shared access signature.') c.argument('content_language', help='Response header value for Content-Language when resource is accessed ' 'using this shared access signature.') c.argument('content_type', help='Response header value for Content-Type when resource is accessed ' 'using this shared access signature.') c.argument('as_user', min_api='2018-11-09', action='store_true', validator=as_user_validator, help="Indicates that this command return the SAS signed with the user delegation key. " "The expiry parameter and '--auth-mode login' are required if this argument is specified. ") c.ignore('sas_token') with self.argument_context('storage container lease') as c: c.argument('lease_duration', type=int) c.argument('lease_break_period', type=int) with self.argument_context('storage container list', resource_type=ResourceType.DATA_STORAGE_BLOB) as c: c.extra('timeout', timeout_type) c.argument('marker', arg_type=marker_type) c.argument('num_results', arg_type=num_results_type) c.argument('prefix', help='Filter the results to return only blobs whose name begins with the specified prefix.') c.argument('include_metadata', arg_type=get_three_state_flag(), help='Specify that container metadata to be returned in the response.') c.argument('show_next_marker', action='store_true', is_preview=True, help='Show nextMarker in result when specified.') c.argument('include_deleted', arg_type=get_three_state_flag(), min_api='2020-02-10', help='Specify that deleted containers to be returned in the response. This is for container restore ' 'enabled account. The default value is `False`') with self.argument_context('storage container restore') as c: c.argument('deleted_container_name', options_list=['--name', '-n'], help='Specify the name of the deleted container to restore.') c.argument('deleted_container_version', options_list=['--deleted-version'], help='Specify the version of the deleted container to restore.') c.extra('timeout', timeout_type) with self.argument_context('storage container-rm', resource_type=ResourceType.MGMT_STORAGE) as c: from .sdkutil import get_container_access_type_names c.argument('container_name', container_name_type, options_list=('--name', '-n'), id_part='child_name_2') c.argument('account_name', storage_account_type) c.argument('resource_group_name', required=False) c.argument('public_access', validator=validate_container_public_access, arg_type=get_enum_type(get_container_access_type_names()), help='Specify whether data in the container may be accessed publicly.') c.ignore('filter', 'maxpagesize') with self.argument_context('storage container-rm create', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('fail_on_exist', help='Throw an exception if the container already exists.') c.argument('enable_vlw', arg_type=get_three_state_flag(), min_api='2021-01-01', is_preview=True, help='The object level immutability property of the container. The property is immutable and can ' 'only be set to true at the container creation time. Existing containers must undergo a migration ' 'process.') for item in ['create', 'update']: with self.argument_context('storage container-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'], arg_group='Encryption Policy', min_api='2019-06-01', help='Default the container to use specified encryption scope for all writes.') c.argument('deny_encryption_scope_override', options_list=['--deny-encryption-scope-override', '--deny-override'], arg_type=get_three_state_flag(), arg_group='Encryption Policy', min_api='2019-06-01', help='Block override of encryption scope from the container default.') with self.argument_context('storage container-rm list', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('account_name', storage_account_type, id_part=None) c.argument('include_deleted', action='store_true', help='Include soft deleted containers when specified.') with self.argument_context('storage share') as c: c.argument('share_name', share_name_type, options_list=('--name', '-n')) with self.argument_context('storage share-rm', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('resource_group_name', required=False) c.argument('account_name', storage_account_type) c.argument('share_name', share_name_type, options_list=('--name', '-n'), id_part='child_name_2') c.argument('expand', default=None) c.argument('x_ms_snapshot', options_list=['--snapshot'], is_preview=True, help='The DateTime value that specifies the share snapshot to retrieve.') c.ignore('filter', 'maxpagesize') with self.argument_context('storage share-rm delete', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('include', default='none') with self.argument_context('storage share-rm update', resource_type=ResourceType.MGMT_STORAGE) as c: c.ignore('x_ms_snapshot') for item in ['create', 'update', 'snapshot']: with self.argument_context('storage share-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c: t_enabled_protocols, t_root_squash, t_access_tier = \ self.get_models('EnabledProtocols', 'RootSquashType', 'ShareAccessTier', resource_type=ResourceType.MGMT_STORAGE) c.argument('share_quota', type=int, options_list=['--quota', '-q'], help='The maximum size of the share in gigabytes. Must be greater than 0, and less than or ' 'equal to 5TB (5120). For Large File Shares, the maximum size is 102400.') c.argument('metadata', nargs='+', help='Metadata in space-separated key=value pairs that is associated with the share. ' 'This overwrites any existing metadata', validator=validate_metadata) c.argument('enabled_protocols', arg_type=get_enum_type(t_enabled_protocols), min_api='2019-06-01', help='Immutable property for file shares protocol. NFS protocol will be ' 'only available for premium file shares (file shares in the FileStorage account type).') c.argument('root_squash', arg_type=get_enum_type(t_root_squash), min_api='2019-06-01', help='Reduction of the access rights for the remote superuser.') c.argument('access_tier', arg_type=get_enum_type(t_access_tier), min_api='2019-06-01', help='Access tier for specific share. GpV2 account can choose between TransactionOptimized ' '(default), Hot, and Cool. FileStorage account can choose Premium.') with self.argument_context('storage share-rm list', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('account_name', storage_account_type, id_part=None) c.argument('include_deleted', action='store_true', help='Include soft deleted file shares when specified.') c.argument('include_snapshot', action='store_true', help='Include file share snapshots when specified.') with self.argument_context('storage share-rm restore', resource_type=ResourceType.MGMT_STORAGE) as c: c.argument('deleted_version', help='Identify the version of the deleted share that will be restored.') c.argument('share_name', help='The file share name. Identify the name of the deleted share that will be restored.') c.argument('restored_name', help='A new file share name to be restored. If not specified, deleted share name will be used.') with self.argument_context('storage share url') as c: c.argument('unc', action='store_true', help='Output UNC network path.') c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.') with self.argument_context('storage share list') as c: c.argument('num_results', arg_type=num_results_type) with self.argument_context('storage share exists') as c: c.ignore('directory_name', 'file_name') with self.argument_context('storage share policy') as c: from .completers import get_storage_acl_name_completion_list t_file_svc = self.get_sdk('file#FileService') t_share_permissions = self.get_sdk('file.models#SharePermissions') c.argument('container_name', share_name_type) c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.', completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_share_acl')) help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_share_permissions)) c.argument('permission', options_list='--permissions', help=help_str, validator=get_permission_validator(t_share_permissions)) c.argument('start', type=get_datetime_type(True), help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.') c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')') with self.argument_context('storage share delete') as c: from .sdkutil import get_delete_file_snapshot_type_names c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_file_snapshot_type_names()), help='Specify the deletion strategy when the share has snapshots.') with self.argument_context('storage share generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_share_permissions = self.get_sdk('file.models#SharePermissions') c.register_sas_arguments() c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the share\'s ACL.', completer=get_storage_acl_name_completion_list(t_share_permissions, 'share_name', 'get_share_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_share_permissions)), validator=get_permission_validator(t_share_permissions)) c.ignore('sas_token') with self.argument_context('storage directory') as c: c.argument('directory_name', directory_type, options_list=('--name', '-n')) with self.argument_context('storage directory exists') as c: c.ignore('file_name') c.argument('directory_name', required=True) with self.argument_context('storage file') as c: c.argument('file_name', file_name_type, options_list=('--name', '-n')) c.argument('directory_name', directory_type, required=False) with self.argument_context('storage file copy') as c: c.argument('share_name', share_name_type, options_list=('--destination-share', '-s'), help='Name of the destination share. The share must exist.') with self.argument_context('storage file copy cancel') as c: c.register_path_argument(options_list=('--destination-path', '-p')) with self.argument_context('storage file delete') as c: c.register_path_argument() with self.argument_context('storage file download') as c: c.register_path_argument() c.argument('file_path', options_list=('--dest',), type=file_type, required=False, help='Path of the file to write to. The source filename will be used if not specified.', validator=process_file_download_namespace, completer=FilesCompleter()) c.argument('path', validator=None) # validator called manually from process_file_download_namespace c.extra('no_progress', progress_type) c.argument('max_connections', type=int) c.argument('start_range', type=int) c.argument('end_range', type=int) with self.argument_context('storage file exists') as c: c.register_path_argument() with self.argument_context('storage file generate-sas') as c: from .completers import get_storage_acl_name_completion_list c.register_path_argument() c.register_sas_arguments() t_file_svc = self.get_sdk('file.fileservice#FileService') t_file_permissions = self.get_sdk('file.models#FilePermissions') c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the container\'s ACL.', completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_container_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_file_permissions)), validator=get_permission_validator(t_file_permissions)) c.ignore('sas_token') with self.argument_context('storage file list') as c: from .completers import dir_path_completer c.argument('directory_name', options_list=('--path', '-p'), help='The directory path within the file share.', completer=dir_path_completer) c.argument('num_results', arg_type=num_results_type) with self.argument_context('storage file metadata show') as c: c.register_path_argument() with self.argument_context('storage file metadata update') as c: c.register_path_argument() with self.argument_context('storage file resize') as c: c.register_path_argument() c.argument('content_length', options_list='--size') with self.argument_context('storage file show') as c: c.register_path_argument() with self.argument_context('storage file update') as c: t_file_content_settings = self.get_sdk('file.models#ContentSettings') c.register_path_argument() c.register_content_settings_argument(t_file_content_settings, update=True) with self.argument_context('storage file upload') as c: t_file_content_settings = self.get_sdk('file.models#ContentSettings') c.register_path_argument(default_file_param='local_file_path') c.register_content_settings_argument(t_file_content_settings, update=False, guess_from_file='local_file_path') c.argument('local_file_path', options_list='--source', type=file_type, completer=FilesCompleter()) c.extra('no_progress', progress_type) c.argument('max_connections', type=int) with self.argument_context('storage file url') as c: c.register_path_argument() c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.') with self.argument_context('storage file upload-batch') as c: from ._validators import process_file_upload_batch_parameters c.argument('source', options_list=('--source', '-s'), validator=process_file_upload_batch_parameters) c.argument('destination', options_list=('--destination', '-d')) c.argument('max_connections', arg_group='Download Control', type=int) c.argument('validate_content', action='store_true', min_api='2016-05-31') c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings') c.extra('no_progress', progress_type) with self.argument_context('storage file download-batch') as c: from ._validators import process_file_download_batch_parameters c.argument('source', options_list=('--source', '-s'), validator=process_file_download_batch_parameters) c.argument('destination', options_list=('--destination', '-d')) c.argument('max_connections', arg_group='Download Control', type=int) c.argument('validate_content', action='store_true', min_api='2016-05-31') c.extra('no_progress', progress_type) with self.argument_context('storage file delete-batch') as c: from ._validators import process_file_batch_source_parameters c.argument('source', options_list=('--source', '-s'), validator=process_file_batch_source_parameters) with self.argument_context('storage file copy start') as c: from azure.cli.command_modules.storage._validators import validate_source_uri c.register_path_argument(options_list=('--destination-path', '-p')) c.register_source_uri_arguments(validator=validate_source_uri) c.extra('file_snapshot', default=None, arg_group='Copy Source', help='The file snapshot for the source storage account.') with self.argument_context('storage file copy start-batch', arg_group='Copy Source') as c: from ._validators import get_source_file_or_blob_service_client c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client) c.extra('source_account_name') c.extra('source_account_key') c.extra('source_uri') c.argument('source_sas') c.argument('source_container') c.argument('source_share') with self.argument_context('storage cors list') as c: c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bqft', options_list='--services', required=False) with self.argument_context('storage cors add') as c: c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True, options_list='--services') c.argument('max_age') c.argument('origins', nargs='+') c.argument('methods', nargs='+', arg_type=get_enum_type(['DELETE', 'GET', 'HEAD', 'MERGE', 'POST', 'OPTIONS', 'PUT'])) c.argument('allowed_headers', nargs='+') c.argument('exposed_headers', nargs='+') with self.argument_context('storage cors clear') as c: c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True, options_list='--services') with self.argument_context('storage queue generate-sas') as c: from .completers import get_storage_acl_name_completion_list t_queue_permissions = self.get_sdk('queue.models#QueuePermissions') c.register_sas_arguments() c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the share\'s ACL.', completer=get_storage_acl_name_completion_list(t_queue_permissions, 'queue_name', 'get_queue_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_queue_permissions)), validator=get_permission_validator(t_queue_permissions)) c.ignore('sas_token') c.ignore('auth_mode') with self.argument_context('storage queue') as c: c.argument('queue_name', queue_name_type, options_list=('--name', '-n')) with self.argument_context('storage queue list') as c: c.argument('include_metadata', help='Specify that queue metadata be returned in the response.') c.argument('marker', arg_type=marker_type) c.argument('num_results', arg_type=num_results_type) c.argument('prefix', help='Filter the results to return only queues whose names ' 'begin with the specified prefix.') c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') c.extra('timeout', help='Request timeout in seconds. Apply to each call to the service.', type=int) with self.argument_context('storage queue create') as c: c.argument('queue_name', queue_name_type, options_list=('--name', '-n'), completer=None) with self.argument_context('storage queue policy') as c: from .completers import get_storage_acl_name_completion_list t_queue_permissions = self.get_sdk('queue.models#QueuePermissions') c.argument('container_name', queue_name_type) c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.', completer=get_storage_acl_name_completion_list(t_queue_service, 'container_name', 'get_queue_acl')) help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_queue_permissions)) c.argument('permission', options_list='--permissions', help=help_str, validator=get_permission_validator(t_queue_permissions)) c.argument('start', type=get_datetime_type(True), help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.') c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')') c.ignore('auth_mode') with self.argument_context('storage message') as c: c.argument('queue_name', queue_name_type) c.argument('message_id', options_list='--id') c.argument('content', type=unicode_string, help='Message content, up to 64KB in size.') with self.argument_context('storage remove') as c: from .completers import file_path_completer c.extra('container_name', container_name_type, validator=validate_azcopy_remove_arguments) c.extra('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type) c.extra('share_name', share_name_type, help='The file share name.') c.extra('path', options_list=('--path', '-p'), help='The path to the file within the file share.', completer=file_path_completer) c.argument('exclude_pattern', exclude_pattern_type) c.argument('include_pattern', include_pattern_type) c.argument('exclude_path', exclude_path_type) c.argument('include_path', include_path_type) c.argument('recursive', recursive_type) c.ignore('destination') c.ignore('service') c.ignore('target') with self.argument_context('storage table') as c: c.argument('table_name', table_name_type, options_list=('--name', '-n')) with self.argument_context('storage table create') as c: c.argument('table_name', table_name_type, options_list=('--name', '-n'), completer=None) c.argument('fail_on_exist', help='Throw an exception if the table already exists.') with self.argument_context('storage table policy') as c: from ._validators import table_permission_validator from .completers import get_storage_acl_name_completion_list c.argument('container_name', table_name_type) c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.', completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl')) help_str = 'Allowed values: (r)ead/query (a)dd (u)pdate (d)elete. Can be combined.' c.argument('permission', options_list='--permissions', help=help_str, validator=table_permission_validator) c.argument('start', type=get_datetime_type(True), help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.') c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')') with self.argument_context('storage table generate-sas') as c: from .completers import get_storage_acl_name_completion_list c.register_sas_arguments() c.argument('id', options_list='--policy-name', help='The name of a stored access policy within the table\'s ACL.', completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl')) c.argument('permission', options_list='--permissions', help=sas_help.format('(r)ead/query (a)dd (u)pdate (d)elete'), validator=table_permission_validator) c.ignore('sas_token') with self.argument_context('storage entity') as c: c.ignore('property_resolver') c.argument('entity', options_list=('--entity', '-e'), validator=validate_entity, nargs='+') c.argument('select', nargs='+', validator=validate_select, help='Space-separated list of properties to return for each entity.') with self.argument_context('storage entity insert') as c: c.argument('if_exists', arg_type=get_enum_type(['fail', 'merge', 'replace'])) with self.argument_context('storage entity query') as c: c.argument('accept', default='minimal', validator=validate_table_payload_format, arg_type=get_enum_type(['none', 'minimal', 'full']), help='Specifies how much metadata to include in the response payload.') c.argument('marker', validator=validate_marker, nargs='+') for item in ['create', 'show', 'delete', 'exists', 'metadata update', 'metadata show']: with self.argument_context('storage fs {}'.format(item)) as c: c.extra('file_system_name', options_list=['--name', '-n'], help="File system name (i.e. container name).", required=True) c.extra('timeout', timeout_type) with self.argument_context('storage fs create') as c: from .sdkutil import get_fs_access_type_names c.argument('public_access', arg_type=get_enum_type(get_fs_access_type_names()), validator=validate_fs_public_access, help="Specify whether data in the file system may be accessed publicly and the level of access.") with self.argument_context('storage fs generate-sas') as c: t_file_system_permissions = self.get_sdk('_models#FileSystemSasPermissions', resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE) c.register_sas_arguments() c.argument('file_system', options_list=['--name', '-n'], help="File system name (i.e. container name).") c.argument('id', options_list='--policy-name', help='The name of a stored access policy.') c.argument('permission', options_list='--permissions', help=sas_help.format(get_permission_help_string(t_file_system_permissions)), validator=get_permission_validator(t_file_system_permissions)) c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed' 'using this shared access signature.') c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed' 'using this shared access signature.') c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed' 'using this shared access signature.') c.argument('content_language', help='Response header value for Content-Language when resource is accessed' 'using this shared access signature.') c.argument('content_type', help='Response header value for Content-Type when resource is accessed' 'using this shared access signature.') c.argument('as_user', min_api='2018-11-09', action='store_true', validator=as_user_validator, help="Indicates that this command return the SAS signed with the user delegation key. " "The expiry parameter and '--auth-mode login' are required if this argument is specified. ") c.ignore('sas_token') c.argument('full_uri', action='store_true', help='Indicate that this command return the full blob URI and the shared access signature token.') with self.argument_context('storage fs list') as c: c.argument('include_metadata', arg_type=get_three_state_flag(), help='Specify that file system metadata be returned in the response. The default value is "False".') c.argument('name_starts_with', options_list=['--prefix'], help='Filter the results to return only file systems whose names begin with the specified prefix.') for item in ['create', 'show', 'delete', 'exists', 'move', 'metadata update', 'metadata show']: with self.argument_context('storage fs directory {}'.format(item)) as c: c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name (i.e. container name).", required=True) c.extra('directory_path', options_list=['--name', '-n'], help="The name of directory.", required=True) c.extra('timeout', timeout_type) with self.argument_context('storage fs directory create') as c: c.extra('permissions', permissions_type) c.extra('umask', umask_type) with self.argument_context('storage fs directory list') as c: c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name (i.e. container name).", required=True) c.argument('recursive', arg_type=get_three_state_flag(), default=True, help='Look into sub-directories recursively when set to true.') c.argument('path', help="Filter the results to return only paths under the specified path.") c.argument('num_results', type=int, help='Specify the maximum number of results to return.') with self.argument_context('storage fs directory move') as c: c.argument('new_name', options_list=['--new-directory', '-d'], help='The new directory name the users want to move to. The value must have the following format: ' '"{filesystem}/{directory}/{subdirectory}".') with self.argument_context('storage fs directory upload') as c: from ._validators import validate_fs_directory_upload_destination_url c.extra('destination_fs', options_list=['--file-system', '-f'], required=True, help='The upload destination file system.') c.extra('destination_path', options_list=['--destination-path', '-d'], validator=validate_fs_directory_upload_destination_url, help='The upload destination directory path. It should be an absolute path to file system. ' 'If the specified destination path does not exist, a new directory path will be created.') c.argument('source', options_list=['--source', '-s'], help='The source file path to upload from.') c.argument('recursive', recursive_type, help='Recursively upload files. If enabled, all the files ' 'including the files in subdirectories will be uploaded.') c.ignore('destination') with self.argument_context('storage fs directory download') as c: from ._validators import validate_fs_directory_download_source_url c.extra('source_fs', options_list=['--file-system', '-f'], required=True, help='The download source file system.') c.extra('source_path', options_list=['--source-path', '-s'], validator=validate_fs_directory_download_source_url, help='The download source directory path. It should be an absolute path to file system.') c.argument('destination', options_list=['--destination-path', '-d'], help='The destination local directory path to download.') c.argument('recursive', recursive_type, help='Recursively download files. If enabled, all the files ' 'including the files in subdirectories will be downloaded.') c.ignore('source') with self.argument_context('storage fs file list') as c: c.extra('file_system_name', options_list=['-f', '--file-system'], help="File system name (i.e. container name).", required=True) c.argument('recursive', arg_type=get_three_state_flag(), default=True, help='Look into sub-directories recursively when set to true.') c.argument('exclude_dir', action='store_true', help='List only files in the given file system.') c.argument('path', help='Filter the results to return only paths under the specified path.') c.argument('num_results', type=int, default=5000, help='Specify the maximum number of results to return. If the request does not specify num_results ' 'or specifies a value greater than 5,000, the server will return up to 5,000 items.') c.argument('marker', help='An opaque continuation token. This value can be retrieved from the next_marker field of a ' 'previous generator object. If specified, this generator will begin returning results from this ' 'point.') c.argument('show_next_marker', action='store_true', is_preview=True, help='Show nextMarker in result when specified.') for item in ['create', 'show', 'delete', 'exists', 'upload', 'append', 'download', 'show', 'metadata update', 'metadata show']: with self.argument_context('storage fs file {}'.format(item)) as c: c.extra('file_system_name', options_list=['-f', '--file-system'], help='File system name (i.e. container name).', required=True) c.extra('path', options_list=['-p', '--path'], help="The file path in a file system.", required=True) c.extra('timeout', timeout_type) c.argument('content', help='Content to be appended to file.') with self.argument_context('storage fs file create') as c: t_file_content_settings = self.get_sdk('_models#ContentSettings', resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE) c.register_content_settings_argument(t_file_content_settings, update=False) c.extra('permissions', permissions_type) c.extra('umask', umask_type) c.extra('timeout', timeout_type) with self.argument_context('storage fs file download') as c: c.argument('destination_path', options_list=['--destination', '-d'], type=file_type, help='The local file where the file or folder will be downloaded to. The source filename will be ' 'used if not specified.') c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified. Default value is false.") with self.argument_context('storage fs file move') as c: t_file_content_settings = self.get_sdk('_models#ContentSettings', resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE) c.register_content_settings_argument(t_file_content_settings, update=False) c.extra('file_system_name', options_list=['-f', '--file-system'], help='File system name (i.e. container name).', required=True) c.extra('path', options_list=['-p', '--path'], required=True, help="The original file path users want to move in a file system.") c.argument('new_name', options_list=['--new-path'], help='The new path the users want to move to. The value must have the following format: ' '"{filesystem}/{directory}/{subdirectory}/{file}".') with self.argument_context('storage fs file upload') as c: t_file_content_settings = self.get_sdk('_models#ContentSettings', resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE) c.register_content_settings_argument(t_file_content_settings, update=False) c.argument('local_path', options_list=['--source', '-s'], help='Path of the local file to upload as the file content.') c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified.") c.argument('if_match', arg_group='Precondition', help="An ETag value, or the wildcard character (*). Specify this header to perform the operation " "only if the resource's ETag matches the value specified.") c.argument('if_none_match', arg_group='Precondition', help="An ETag value, or the wildcard character (*). Specify this header to perform the operation " "only if the resource's ETag does not match the value specified.") c.argument('if_modified_since', arg_group='Precondition', help="A Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').") c.argument('if_unmodified_since', arg_group='Precondition', help="A Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').") c.argument('permissions', permissions_type) c.argument('umask', umask_type) for item in ['set', 'show']: with self.argument_context('storage fs access {}'.format(item)) as c: from ._validators import validate_access_control c.extra('file_system_name', options_list=['-f', '--file-system'], help='File system name (i.e. container name).', required=True) c.extra('directory_path', options_list=['-p', '--path'], help='The path to a file or directory in the specified file system.', required=True) c.argument('permissions', validator=validate_access_control) c.ignore('upn') for item in ['set-recursive', 'update-recursive', 'remove-recursive']: with self.argument_context('storage fs access {}'.format(item)) as c: c.register_fs_directory_arguments() c.argument('acl', help='The value is a comma-separated list of access control entries. Each access control ' 'entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the ' 'format "[scope:][type]:[id]:[permissions]". For more information, please refer to ' 'https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-access-control.') c.extra('continuation', help='Optional continuation token that can be used to resume previously stopped operation.') c.extra('batch_size', type=int, help='Optional. If data set size exceeds batch size then operation will ' 'be split into multiple requests so that progress can be tracked. Batch size should be between 1 ' 'and 2000. The default when unspecified is 2000.') c.extra('max_batches', type=int, help='Optional. Define maximum number of batches that single change ' 'Access Control operation can execute. If maximum is reached before all sub-paths are processed, ' 'then continuation token can be used to resume operation. Empty value indicates that maximum ' 'number of batches in unbound and operation continues till end.') c.extra('continue_on_failure', arg_type=get_three_state_flag(), help='If set to False, the operation will terminate quickly on encountering user errors (4XX). ' 'If True, the operation will ignore user errors and proceed with the operation on other ' 'sub-entities of the directory. Continuation token will only be returned when ' '--continue-on-failure is True in case of user errors. If not set the default value is False ' 'for this.')
70.04334
166
0.655061
4a0155ff2262c0768d45920e712b06aeed6ea390
61,397
py
Python
qiskit/_quantumprogram.py
drmandaarbpande/qiskit-sdk-py
04065ed68be2dbe25aefa1d153057f6f3986ae7b
[ "Apache-2.0" ]
3
2021-11-08T11:46:42.000Z
2021-12-27T10:13:38.000Z
qiskit/_quantumprogram.py
drmandaarbpande/qiskit-sdk-py
04065ed68be2dbe25aefa1d153057f6f3986ae7b
[ "Apache-2.0" ]
2
2021-11-09T14:57:09.000Z
2022-01-12T12:35:58.000Z
artifacts/old_dataset_versions/original_commits_v02/qiskit-terra/qiskit-terra#342/after/_quantumprogram.py
MattePalte/Bugs-Quantum-Computing-Platforms
0c1c805fd5dfce465a8955ee3faf81037023a23e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2017 IBM RESEARCH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """ Qasm Program Class """ import random import json import logging import os import string import re from threading import Event import copy # use the external IBMQuantumExperience Library import itertools from IBMQuantumExperience import IBMQuantumExperience # Local Simulator Modules import qiskit.backends # Stable Modules from . import QuantumRegister from . import ClassicalRegister from . import QuantumCircuit from . import QISKitError from . import JobProcessor from . import QuantumJob from . import Measure from . import Gate from .extensions.standard.barrier import Barrier from ._logging import set_qiskit_logger, unset_qiskit_logger # Beta Modules from . import unroll from . import qasm from . import mapper from . import _openquantumcompiler as openquantumcompiler FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)') ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])') logger = logging.getLogger(__name__) def convert(name): """Return a snake case string from a camelcase string.""" string_1 = FIRST_CAP_RE.sub(r'\1_\2', name) return ALL_CAP_RE.sub(r'\1_\2', string_1).lower() class QuantumProgram(object): """Quantum Program Class. Class internal properties. Elements that are not python identifiers or string constants are denoted by "--description (type)--". For example, a circuit's name is denoted by "--circuit name (string)--" and might have the value "teleport". Internal:: __quantum_registers (list[dic]): An dictionary of quantum registers used in the quantum program. __quantum_registers = { --register name (string)--: QuantumRegister, } __classical_registers (list[dic]): An ordered list of classical registers used in the quantum program. __classical_registers = { --register name (string)--: ClassicalRegister, } __quantum_program (dic): An dictionary of quantum circuits __quantum_program = { --circuit name (string)--: --circuit object --, } __init_circuit (obj): A quantum circuit object for the initial quantum circuit __ONLINE_BACKENDS (list[str]): A list of online backends __LOCAL_BACKENDS (list[str]): A list of local backends """ # -- FUTURE IMPROVEMENTS -- # TODO: for status results make ALL_CAPS (check) or some unified method # TODO: Jay: coupling_map, basis_gates will move into a config object # only exists once you set the api to use the online backends __api = {} __api_config = {} def __init__(self, specs=None): self.__quantum_registers = {} self.__classical_registers = {} self.__quantum_program = {} # stores all the quantum programs self.__init_circuit = None # stores the intial quantum circuit of the program self.__ONLINE_BACKENDS = [] # pylint: disable=invalid-name self.__LOCAL_BACKENDS = qiskit.backends.local_backends() # pylint: disable=invalid-name self.__counter = itertools.count() self.mapper = mapper if specs: self.__init_specs(specs) def enable_logs(self, level=logging.INFO): """Enable the console output of the logging messages. Enable the output of logging messages (above level `level`) to the console, by configuring the `qiskit` logger accordingly. Params: level (int): minimum severity of the messages that are displayed. Note: This is a convenience method over the standard Python logging facilities, and modifies the configuration of the 'qiskit.*' loggers. If finer control over the logging configuration is needed, it is encouraged to bypass this method. """ # Update the handlers and formatters. set_qiskit_logger() # Set the logger level. logging.getLogger('qiskit').setLevel(level) def disable_logs(self): """Disable the console output of the logging messages. Disable the output of logging messages (above level `level`) to the console, by removing the handlers from the `qiskit` logger. Note: This is a convenience method over the standard Python logging facilities, and modifies the configuration of the 'qiskit.*' loggers. If finer control over the logging configuration is needed, it is encouraged to bypass this method. """ unset_qiskit_logger() ############################################################### # methods to initiate an build a quantum program ############################################################### def __init_specs(self, specs): """Populate the Quantum Program Object with initial Specs. Args: specs (dict): Q_SPECS = { "circuits": [{ "name": "Circuit", "quantum_registers": [{ "name": "qr", "size": 4 }], "classical_registers": [{ "name": "cr", "size": 4 }] }], """ quantumr = [] classicalr = [] if "circuits" in specs: for circuit in specs["circuits"]: quantumr = self.create_quantum_registers( circuit["quantum_registers"]) classicalr = self.create_classical_registers( circuit["classical_registers"]) self.create_circuit(name=circuit.get("name"), qregisters=quantumr, cregisters=classicalr) # TODO: Jay: I think we should return function handles for the # registers and circuit. So that we dont need to get them after we # create them with get_quantum_register etc def create_quantum_register(self, name=None, size=1): """Create a new Quantum Register. Args: name (hashable or None): the name of the quantum register. If None, an automatically generated identifier will be assigned. size (int): the size of the quantum register Returns: QuantumRegister: internal reference to a quantum register in __quantum_registers Raises: QISKitError: if the register already exists in the program. """ if name is not None and name in self.__quantum_registers: if size != len(self.__quantum_registers[name]): raise QISKitError("Can't make this register: Already in" " program with different size") logger.info(">> quantum_register exists: %s %s", name, size) return self.__quantum_registers[name] if name is None: name = self._create_id('q', self.__quantum_registers) self.__quantum_registers[name] = QuantumRegister(name, size) logger.info(">> new quantum_register created: %s %s", name, size) return self.__quantum_registers[name] def destroy_quantum_register(self, name): """Destroy an existing Quantum Register. Args: name (hashable): the name of the quantum register Raises: QISKitError: if the register does not exist in the program. """ if name not in self.__quantum_registers: raise QISKitError("Can't destroy this register: Not present") else: logger.info(">> quantum_register destroyed: %s", name) del self.__quantum_registers[name] def create_quantum_registers(self, register_array): """Create a new set of Quantum Registers based on a array of them. Args: register_array (list[dict]): An array of quantum registers in dictionary format. For example: [{"name": "qr", "size": 4}, ... ] Any other key in the dictionary will be ignored. If "name" is not defined (or None) a random name wil be assigned. Returns: list(QuantumRegister): Array of quantum registers objects """ new_registers = [] for register in register_array: register = self.create_quantum_register( register.get('name'), register["size"]) new_registers.append(register) return new_registers def destroy_quantum_registers(self, register_array): """Destroy a set of Quantum Registers based on a array of them. Args: register_array (list[dict]): An array of quantum registers in dictionary format. For example: [{"name": "qr"}, ... ] Any other key in the dictionary will be ignored. """ for register in register_array: self.destroy_quantum_register(register["name"]) def create_classical_register(self, name=None, size=1): """Create a new Classical Register. Args: name (hashable or None): the name of the classical register. If None, an automatically generated identifier will be assigned. size (int): the size of the classical register Returns: ClassicalRegister: internal reference to a classical register in __classical_registers Raises: QISKitError: if the register already exists in the program. """ if name is not None and name in self.__classical_registers: if size != len(self.__classical_registers[name]): raise QISKitError("Can't make this register: Already in" " program with different size") logger.info(">> classical register exists: %s %s", name, size) return self.__classical_registers[name] if name is None: name = self._create_id('c', self.__classical_registers) self.__classical_registers[name] = ClassicalRegister(name, size) logger.info(">> new classical register created: %s %s", name, size) return self.__classical_registers[name] def create_classical_registers(self, registers_array): """Create a new set of Classical Registers based on a array of them. Args: registers_array (list[dict]): An array of classical registers in dictionary format. For example: [{"name": "cr", "size": 4}, ... ] Any other key in the dictionary will be ignored. If "name" is not defined (or None) a random name wil be assigned. Returns: list(ClassicalRegister): Array of clasical registers objects """ new_registers = [] for register in registers_array: new_registers.append(self.create_classical_register( register.get("name"), register["size"])) return new_registers def destroy_classical_register(self, name): """Destroy an existing Classical Register. Args: name (hashable): the name of the classical register Raises: QISKitError: if the register does not exist in the program. """ if name not in self.__classical_registers: raise QISKitError("Can't destroy this register: Not present") else: logger.info(">> classical register destroyed: %s", name) del self.__classical_registers[name] def destroy_classical_registers(self, registers_array): """Destroy a set of Classical Registers based on a array of them. Args: registers_array (list[dict]): An array of classical registers in dictionary format. For example: [{"name": "cr"}, ... ] Any other key in the dictionary will be ignored. """ for register in registers_array: self.destroy_classical_register(register["name"]) def create_circuit(self, name=None, qregisters=None, cregisters=None): """Create a empty Quantum Circuit in the Quantum Program. Args: name (hashable or None): the name of the circuit. If None, an automatically generated identifier will be assigned. qregisters (list(QuantumRegister)): is an Array of Quantum Registers by object reference cregisters (list(ClassicalRegister)): is an Array of Classical Registers by object reference Returns: QuantumCircuit: A quantum circuit is created and added to the Quantum Program """ if name is None: name = self._create_id('qc', self.__quantum_program.keys()) if not qregisters: qregisters = [] if not cregisters: cregisters = [] quantum_circuit = QuantumCircuit(name=name) if not self.__init_circuit: self.__init_circuit = quantum_circuit for register in qregisters: quantum_circuit.add(register) for register in cregisters: quantum_circuit.add(register) self.add_circuit(name, quantum_circuit) return self.__quantum_program[name] def destroy_circuit(self, name): """Destroy a Quantum Circuit in the Quantum Program. This will not destroy any registers associated with the circuit. Args: name (hashable): the name of the circuit Raises: QISKitError: if the register does not exist in the program. """ if name not in self.__quantum_program: raise QISKitError("Can't destroy this circuit: Not present") del self.__quantum_program[name] def add_circuit(self, name=None, quantum_circuit=None): """Add a new circuit based on an Object representation. Args: name (hashable or None): the name of the circuit to add. If None, an automatically generated identifier will be assigned to the circuit. quantum_circuit (QuantumCircuit): a quantum circuit to add to the program-name Raises: QISKitError: if `quantum_circuit` is None, as the attribute is optional only for not breaking backwards compatibility (as it is placed after an optional argument). """ if quantum_circuit is None: raise QISKitError('quantum_circuit is required when invoking ' 'add_circuit') if name is None: if quantum_circuit.name: name = quantum_circuit.name else: name = self._create_id('qc', self.__quantum_program.keys()) quantum_circuit.name = name for qname, qreg in quantum_circuit.get_qregs().items(): self.create_quantum_register(qname, len(qreg)) for cname, creg in quantum_circuit.get_cregs().items(): self.create_classical_register(cname, len(creg)) self.__quantum_program[name] = quantum_circuit def load_qasm_file(self, qasm_file, name=None, basis_gates='u1,u2,u3,cx,id'): """ Load qasm file into the quantum program. Args: qasm_file (str): a string for the filename including its location. name (str or None): the name of the quantum circuit after loading qasm text into it. If no name is give the name is of the text file. basis_gates (str): basis gates for the quantum circuit. Returns: str: Adds a quantum circuit with the gates given in the qasm file to the quantum program and returns the name to be used to get this circuit Raises: QISKitError: if the file cannot be read. """ if not os.path.exists(qasm_file): raise QISKitError('qasm file "{0}" not found'.format(qasm_file)) if not name: name = os.path.splitext(os.path.basename(qasm_file))[0] node_circuit = qasm.Qasm(filename=qasm_file).parse() # Node (AST) logger.info("circuit name: %s", name) logger.info("******************************") logger.info(node_circuit.qasm()) # current method to turn it a DAG quantum circuit. unrolled_circuit = unroll.Unroller(node_circuit, unroll.CircuitBackend(basis_gates.split(","))) circuit_unrolled = unrolled_circuit.execute() self.add_circuit(name, circuit_unrolled) return name def load_qasm_text(self, qasm_string, name=None, basis_gates='u1,u2,u3,cx,id'): """ Load qasm string in the quantum program. Args: qasm_string (str): a string for the file name. name (str or None): the name of the quantum circuit after loading qasm text into it. If no name is give the name is of the text file. basis_gates (str): basis gates for the quantum circuit. Returns: str: Adds a quantum circuit with the gates given in the qasm string to the quantum program. """ node_circuit = qasm.Qasm(data=qasm_string).parse() # Node (AST) if not name: # Get a random name if none is given name = "".join([random.choice(string.ascii_letters + string.digits) for n in range(10)]) logger.info("circuit name: %s", name) logger.info("******************************") logger.info(node_circuit.qasm()) # current method to turn it a DAG quantum circuit. unrolled_circuit = unroll.Unroller(node_circuit, unroll.CircuitBackend(basis_gates.split(","))) circuit_unrolled = unrolled_circuit.execute() self.add_circuit(name, circuit_unrolled) return name ############################################################### # methods to get elements from a QuantumProgram ############################################################### def get_quantum_register(self, name=None): """Return a Quantum Register by name. Args: name (hashable or None): the name of the quantum register. If None and there is only one quantum register available, returns that one. Returns: QuantumRegister: The quantum register with this name. Raises: KeyError: if the quantum register is not on the quantum program. QISKitError: if the register does not exist in the program. """ if name is None: name = self._get_single_item(self.get_quantum_register_names(), "a quantum register") try: return self.__quantum_registers[name] except KeyError: raise KeyError('No quantum register "{0}"'.format(name)) def get_classical_register(self, name=None): """Return a Classical Register by name. Args: name (hashable or None): the name of the classical register. If None and there is only one classical register available, returns that one. Returns: ClassicalRegister: The classical register with this name. Raises: KeyError: if the classical register is not on the quantum program. QISKitError: if the register does not exist in the program. """ if name is None: name = self._get_single_item(self.get_classical_register_names(), "a classical register") try: return self.__classical_registers[name] except KeyError: raise KeyError('No classical register "{0}"'.format(name)) def get_quantum_register_names(self): """Return all the names of the quantum Registers.""" return list(self.__quantum_registers.keys()) def get_classical_register_names(self): """Return all the names of the classical Registers.""" return list(self.__classical_registers.keys()) def get_circuit(self, name=None): """Return a Circuit Object by name Args: name (hashable or None): the name of the quantum circuit. If None and there is only one circuit available, returns that one. Returns: QuantumCircuit: The quantum circuit with this name Raises: KeyError: if the circuit is not on the quantum program. QISKitError: if the register does not exist in the program. """ if name is None: name = self._get_single_item(self.get_circuit_names(), "a circuit") try: return self.__quantum_program[name] except KeyError: raise KeyError('No quantum circuit "{0}"'.format(name)) def get_circuit_names(self): """Return all the names of the quantum circuits.""" return list(self.__quantum_program.keys()) def get_qasm(self, name=None): """Get qasm format of circuit by name. Args: name (hashable or None): name of the circuit. If None and only one circuit is available, that one is selected. Returns: str: The quantum circuit in qasm format Raises: QISKitError: if the register does not exist in the program. """ if name is None: name = self._get_single_item(self.get_circuit_names(), "a circuit") quantum_circuit = self.get_circuit(name) return quantum_circuit.qasm() def get_qasms(self, list_circuit_name=None): """Get qasm format of circuit by list of names. Args: list_circuit_name (list[hashable] or None): names of the circuit. If None, it gets all the circuits in the program. Returns: list(QuantumCircuit): List of quantum circuit in qasm format Raises: QISKitError: if the register does not exist in the program. """ qasm_source = [] if list_circuit_name is None: list_circuit_name = self.get_circuit_names() for name in list_circuit_name: qasm_source.append(self.get_qasm(name)) return qasm_source def get_initial_circuit(self): """Return the initialization Circuit.""" return self.__init_circuit ############################################################### # methods for working with backends ############################################################### def set_api(self, token, url, hub=None, group=None, project=None, proxies=None, verify=True): """ Setup the API. Fills the __ONLINE_BACKENDS, __api, and __api_config variables. Does not catch exceptions from IBMQuantumExperience. Args: token (str): The token used to register on the online backend such as the quantum experience. url (str): The url used for online backend such as the quantum experience. hub (str): The hub used for online backend. group (str): The group used for online backend. project (str): The project used for online backend. proxies (dict): Proxy configuration for the API, as a dict with 'urls' and credential keys. verify (bool): If False, ignores SSL certificates errors. Raises: ConnectionError: if the API instantiation failed. QISKitError: if no hub, group or project were specified. """ try: config_dict = { 'url': url, 'hub': hub, 'group': group, 'project': project } if proxies: config_dict['proxies'] = proxies self.__api = IBMQuantumExperience(token, config_dict, verify) except Exception as ex: root_exception = ex if 'License required' in str(ex): # For the 401 License required exception from the API, be # less verbose with the exceptions. root_exception = None raise ConnectionError("Couldn't connect to IBMQuantumExperience server: {0}" .format(ex)) from root_exception qiskit.backends.discover_remote_backends(self.__api) self.__ONLINE_BACKENDS = self.online_backends() self.__api_config["token"] = token self.__api_config["config"] = config_dict.copy() def set_api_hubs_config(self, hub, group, project): """Update the API hubs configuration, replacing the previous one. hub (str): The hub used for online backend. group (str): The group used for online backend. project (str): The project used for online backend. """ config_dict = { 'hub': hub, 'group': group, 'project': project } for key, value in config_dict.items(): self.__api.config[key] = value self.__api_config['config'][key] = value def get_api_config(self): """Return the program specs.""" return self.__api_config def get_api(self): """Returns a function handle to the API.""" return self.__api def save(self, file_name=None, beauty=False): """ Save Quantum Program in a Json file. Args: file_name (str): file name and path. beauty (boolean): save the text with indent 4 to make it readable. Returns: dict: The dictionary with the status and result of the operation Raises: LookupError: if the file_name is not correct, or writing to the file resulted in an error. """ if file_name is None: error = {"status": "Error", "result": "Not filename provided"} raise LookupError(error['result']) if beauty: indent = 4 else: indent = 0 elemements_to_save = self.__quantum_program elements_saved = {} for circuit in elemements_to_save: elements_saved[circuit] = {} elements_saved[circuit]["qasm"] = elemements_to_save[circuit].qasm() try: with open(file_name, 'w') as save_file: json.dump(elements_saved, save_file, indent=indent) return {'status': 'Done', 'result': elemements_to_save} except ValueError: error = {'status': 'Error', 'result': 'Some Problem happened to save the file'} raise LookupError(error['result']) def load(self, file_name=None): """ Load Quantum Program Json file into the Quantum Program object. Args: file_name (str): file name and path. Returns: dict: The dictionary with the status and result of the operation Raises: LookupError: if the file_name is not correct, or reading from the file resulted in an error. """ if file_name is None: error = {"status": "Error", "result": "Not filename provided"} raise LookupError(error['result']) try: with open(file_name, 'r') as load_file: elemements_loaded = json.load(load_file) for circuit in elemements_loaded: circuit_qasm = elemements_loaded[circuit]["qasm"] elemements_loaded[circuit] = qasm.Qasm(data=circuit_qasm).parse() self.__quantum_program = elemements_loaded return {"status": 'Done', 'result': self.__quantum_program} except ValueError: error = {'status': 'Error', 'result': 'Some Problem happened to load the file'} raise LookupError(error['result']) def available_backends(self): """All the backends that are seen by QISKIT.""" return self.__ONLINE_BACKENDS + self.__LOCAL_BACKENDS def online_backends(self): """Get the online backends. Queries network API if it exists and gets the backends that are online. Returns: list(str): List of online backends names if the online api has been set or an empty list if it has not been set. Raises: ConnectionError: if the API call failed. """ if self.get_api(): try: backends = self.__api.available_backends() except Exception as ex: raise ConnectionError("Couldn't get available backend list: {0}" .format(ex)) return [backend['name'] for backend in backends] return [] def online_simulators(self): """Gets online simulators via QX API calls. Returns: list(str): List of online simulator names. Raises: ConnectionError: if the API call failed. """ online_simulators_list = [] if self.get_api(): try: backends = self.__api.available_backends() except Exception as ex: raise ConnectionError("Couldn't get available backend list: {0}" .format(ex)) for backend in backends: if backend['simulator']: online_simulators_list.append(backend['name']) return online_simulators_list def online_devices(self): """Gets online devices via QX API calls. Returns: list(str): List of online devices names. Raises: ConnectionError: if the API call failed. """ devices = [] if self.get_api(): try: backends = self.__api.available_backends() except Exception as ex: raise ConnectionError("Couldn't get available backend list: {0}" .format(ex)) for backend in backends: if not backend['simulator']: devices.append(backend['name']) return devices def get_backend_status(self, backend): """Return the online backend status. It uses QX API call or by local backend is the name of the local or online simulator or experiment. Args: backend (str): The backend to check Returns: dict: {'available': True} Raises: ConnectionError: if the API call failed. ValueError: if the backend is not available. """ if backend in self.__ONLINE_BACKENDS: try: return self.__api.backend_status(backend) except Exception as ex: raise ConnectionError("Couldn't get backend status: {0}" .format(ex)) elif backend in self.__LOCAL_BACKENDS: return {'available': True} else: raise ValueError('the backend "{0}" is not available'.format(backend)) def get_backend_configuration(self, backend, list_format=False): """Return the configuration of the backend. The return is via QX API call. Args: backend (str): Name of the backend. list_format (bool): Struct used for the configuration coupling map: dict (if False) or list (if True). Returns: dict: The configuration of the named backend. Raises: ConnectionError: if the API call failed. LookupError: if a configuration for the named backend can't be found. """ if self.get_api(): configuration_edit = {} try: backends = self.__api.available_backends() except Exception as ex: raise ConnectionError("Couldn't get available backend list: {0}" .format(ex)) for configuration in backends: if configuration['name'] == backend: for key in configuration: new_key = convert(key) # TODO: removed these from the API code if new_key not in ['id', 'serial_number', 'topology_id', 'status', 'coupling_map']: configuration_edit[new_key] = configuration[key] if new_key == 'coupling_map': if configuration[key] == 'all-to-all': configuration_edit[new_key] = \ configuration[key] else: if not list_format: cmap = mapper.coupling_list2dict(configuration[key]) else: cmap = configuration[key] configuration_edit[new_key] = cmap return configuration_edit raise LookupError('Configuration for %s could not be found.' % backend) else: return qiskit.backends.get_backend_configuration(backend) def get_backend_calibration(self, backend): """Return the online backend calibrations. The return is via QX API call. Args: backend (str): Name of the backend. Returns: dict: The calibration of the named backend. Raises: ConnectionError: if the API call failed. LookupError: If a configuration for the named backend can't be found. """ if backend in self.__ONLINE_BACKENDS: try: calibrations = self.__api.backend_calibration(backend) except Exception as ex: raise ConnectionError("Couldn't get backend calibration: {0}" .format(ex)) calibrations_edit = {} for key, vals in calibrations.items(): new_key = convert(key) calibrations_edit[new_key] = vals return calibrations_edit elif backend in self.__LOCAL_BACKENDS: return {'backend': backend, 'calibrations': None} else: raise LookupError( 'backend calibration for "{0}" not found'.format(backend)) def get_backend_parameters(self, backend): """Return the online backend parameters. The return is via QX API call. Args: backend (str): Name of the backend. Returns: dict: The configuration of the named backend. Raises: ConnectionError: if the API call failed. LookupError: If a configuration for the named backend can't be found. """ if backend in self.__ONLINE_BACKENDS: try: parameters = self.__api.backend_parameters(backend) except Exception as ex: raise ConnectionError("Couldn't get backend parameters: {0}" .format(ex)) parameters_edit = {} for key, vals in parameters.items(): new_key = convert(key) parameters_edit[new_key] = vals return parameters_edit elif backend in self.__LOCAL_BACKENDS: return {'backend': backend, 'parameters': None} else: raise LookupError( 'backend parameters for "{0}" not found'.format(backend)) ############################################################### # methods to compile quantum programs into qobj ############################################################### def compile(self, name_of_circuits=None, backend="local_qasm_simulator", config=None, basis_gates=None, coupling_map=None, initial_layout=None, shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None): """Compile the circuits into the execution list. This builds the internal "to execute" list which is list of quantum circuits to run on different backends. Args: name_of_circuits (list[hashable] or None): circuit names to be compiled. If None, all the circuits will be compiled. backend (str): a string representing the backend to compile to. config (dict): a dictionary of configurations parameters for the compiler. basis_gates (str): a comma separated string and are the base gates, which by default are provided by the backend. coupling_map (dict): A directed graph of coupling:: { control(int): [ target1(int), target2(int), , ... ], ... } eg. {0: [2], 1: [2], 3: [2]} initial_layout (dict): A mapping of qubit to qubit:: { ("q", strart(int)): ("q", final(int)), ... } eg. { ("q", 0): ("q", 0), ("q", 1): ("q", 1), ("q", 2): ("q", 2), ("q", 3): ("q", 3) } shots (int): the number of shots max_credits (int): the max credits to use 3, or 5 seed (int): the initial seed the simulators use qobj_id (str): identifier of the qobj. hpc (dict): This will setup some parameter for ibmqx_hpc_qasm_simulator, using a JSON-like format like:: { 'multi_shot_optimization': Boolean, 'omp_num_threads': Numeric } This parameter MUST be used only with ibmqx_hpc_qasm_simulator, otherwise the SDK will warn the user via logging, and set the value to None. Returns: dict: the job id and populates the qobj:: qobj = { id: --job id (string), config: -- dictionary of config settings (dict)--, { "max_credits" (online only): -- credits (int) --, "shots": -- number of shots (int) --. "backend": -- backend name (str) -- } circuits: [ { "name": --circuit name (string)--, "compiled_circuit": --compiled quantum circuit (JSON format)--, "compiled_circuit_qasm": --compiled quantum circuit (QASM format)--, "config": --dictionary of additional config settings (dict)--, { "coupling_map": --adjacency list (dict)--, "basis_gates": --comma separated gate names (string)--, "layout": --layout computed by mapper (dict)--, "seed": (simulator only)--initial seed for the simulator (int)--, } }, ... ] } Raises: ValueError: if no names of the circuits have been specified. QISKitError: if any of the circuit names cannot be found on the Quantum Program. """ # TODO: Jay: currently basis_gates, coupling_map, initial_layout, # shots, max_credits and seed are extra inputs but I would like # them to go into the config. qobj = {} if not qobj_id: qobj_id = "".join([random.choice(string.ascii_letters + string.digits) for n in range(30)]) qobj['id'] = qobj_id qobj["config"] = {"max_credits": max_credits, 'backend': backend, "shots": shots} # TODO This backend needs HPC parameters to be passed in order to work if backend == 'ibmqx_hpc_qasm_simulator': if hpc is None: logger.info('ibmqx_hpc_qasm_simulator backend needs HPC ' 'parameter. Setting defaults to hpc.multi_shot_optimization ' '= true and hpc.omp_num_threads = 16') hpc = {'multi_shot_optimization': True, 'omp_num_threads': 16} if not all(key in hpc for key in ('multi_shot_optimization', 'omp_num_threads')): raise QISKitError('Unknown HPC parameter format!') qobj['config']['hpc'] = hpc elif hpc is not None: logger.info('HPC parameter is only available for ' 'ibmqx_hpc_qasm_simulator. You are passing an HPC parameter ' 'but you are not using ibmqx_hpc_qasm_simulator, so we will ' 'ignore it.') hpc = None qobj['circuits'] = [] backend_conf = qiskit.backends.get_backend_configuration(backend) if not basis_gates: if 'basis_gates' in backend_conf: basis_gates = backend_conf['basis_gates'] elif len(basis_gates.split(',')) < 2: # catches deprecated basis specification like 'SU2+CNOT' logger.warning('encountered deprecated basis specification: ' '"%s" substituting u1,u2,u3,cx,id', str(basis_gates)) basis_gates = 'u1,u2,u3,cx,id' if not coupling_map: coupling_map = backend_conf['coupling_map'] if not name_of_circuits: logger.info('Since not circuits was specified, all the circuits will be compiled.') name_of_circuits = self.get_circuit_names() if isinstance(name_of_circuits, str): name_of_circuits = [name_of_circuits] for name in name_of_circuits: if name not in self.__quantum_program: raise QISKitError('circuit "{0}" not found in program'.format(name)) circuit = self.__quantum_program[name] num_qubits = sum((len(qreg) for qreg in circuit.get_qregs().values())) # TODO: A better solution is to have options to enable/disable optimizations if num_qubits == 1: coupling_map = None if coupling_map == 'all-to-all': coupling_map = None # if the backend is a real chip, insert barrier before measurements if not backend_conf['simulator']: measured_qubits = [] qasm_idx = [] for i, instruction in enumerate(circuit.data): if isinstance(instruction, Measure): measured_qubits.append(instruction.arg[0]) qasm_idx.append(i) elif isinstance(instruction, Gate) and bool(set(instruction.arg) & set(measured_qubits)): raise QISKitError('backend "{0}" rejects gate after ' 'measurement in circuit "{1}"'.format(backend, name)) for i, qubit in zip(qasm_idx, measured_qubits): circuit.data.insert(i, Barrier([qubit], circuit)) dag_circuit, final_layout = openquantumcompiler.compile( circuit.qasm(), basis_gates=basis_gates, coupling_map=coupling_map, initial_layout=initial_layout, get_layout=True) # making the job to be added to qobj job = {} job["name"] = name # config parameters used by the runner if config is None: config = {} # default to empty config dict job["config"] = copy.deepcopy(config) job["config"]["coupling_map"] = mapper.coupling_dict2list(coupling_map) # TODO: Jay: make config options optional for different backends # Map the layout to a format that can be json encoded list_layout = None if final_layout: list_layout = [[k, v] for k, v in final_layout.items()] job["config"]["layout"] = list_layout job["config"]["basis_gates"] = basis_gates if seed is None: job["config"]["seed"] = None else: job["config"]["seed"] = seed # the compiled circuit to be run saved as a dag job["compiled_circuit"] = openquantumcompiler.dag2json(dag_circuit, basis_gates=basis_gates) # set eval_symbols=True to evaluate each symbolic expression # TODO after transition to qobj, we can drop this job["compiled_circuit_qasm"] = dag_circuit.qasm(qeflag=True, eval_symbols=True) # add job to the qobj qobj["circuits"].append(job) return qobj def reconfig(self, qobj, backend=None, config=None, shots=None, max_credits=None, seed=None): """Change configuration parameters for a compile qobj. Only parameters which don't affect the circuit compilation can change, e.g., the coupling_map cannot be changed here! Notes: If the inputs are left as None then the qobj is not updated Args: qobj (dict): already compile qobj backend (str): see .compile config (dict): see .compile shots (int): see .compile max_credits (int): see .compile seed (int): see .compile Returns: qobj: updated qobj """ if backend is not None: qobj['config']['backend'] = backend if shots is not None: qobj['config']['shots'] = shots if max_credits is not None: qobj['config']['max_credits'] = max_credits for circuits in qobj['circuits']: if seed is not None: circuits['seed'] = seed if config is not None: circuits['config'].update(config) return qobj def get_execution_list(self, qobj, print_func=print): """Print the compiled circuits that are ready to run. Note: This method is intended to be used during interactive sessions, and prints directly to stdout instead of using the logger by default. If you set print_func with a log function (eg. log.info) it will be used instead of the stdout. Returns: list(hashable): names of the circuits in `qobj` """ if not qobj: print_func("no executions to run") execution_list = [] print_func("id: %s" % qobj['id']) print_func("backend: %s" % qobj['config']['backend']) print_func("qobj config:") for key in qobj['config']: if key != 'backend': print_func(' ' + key + ': ' + str(qobj['config'][key])) for circuit in qobj['circuits']: execution_list.append(circuit["name"]) print_func(' circuit name: ' + str(circuit["name"])) print_func(' circuit config:') for key in circuit['config']: print_func(' ' + key + ': ' + str(circuit['config'][key])) return execution_list def get_compiled_configuration(self, qobj, name): """Get the compiled layout for the named circuit and backend. Args: name (str): the circuit name qobj (dict): the qobj Returns: dict: the config of the circuit. Raises: QISKitError: if the circuit has no configurations """ try: for index in range(len(qobj["circuits"])): if qobj["circuits"][index]['name'] == name: return qobj["circuits"][index]["config"] except KeyError: pass raise QISKitError('No compiled configurations for circuit "{0}"'.format(name)) def get_compiled_qasm(self, qobj, name): """Return the compiled cricuit in qasm format. Args: qobj (dict): the qobj name (str): name of the quantum circuit Returns: str: the QASM of the compiled circuit. Raises: QISKitError: if the circuit has no configurations """ try: for index in range(len(qobj["circuits"])): if qobj["circuits"][index]['name'] == name: return qobj["circuits"][index]["compiled_circuit_qasm"] except KeyError: pass raise QISKitError('No compiled qasm for circuit "{0}"'.format(name)) ############################################################### # methods to run quantum programs ############################################################### def run(self, qobj, wait=5, timeout=60): """Run a program (a pre-compiled quantum program). This function will block until the Job is processed. The program to run is extracted from the qobj parameter. Args: qobj (dict): the dictionary of the quantum object to run. wait (int): Time interval to wait between requests for results timeout (int): Total time to wait until the execution stops Returns: Result: A Result (class). """ job_blocker_event = Event() job_result = None def job_done_callback(results): """Callback called when the job is done. It basically transforms the results to what the user expects and pass it to the main thread """ nonlocal job_result job_result = results[0] job_blocker_event.set() self._run_internal([qobj], wait=wait, timeout=timeout, callback=job_done_callback) # Do not set a timeout, as the timeout is being managed by the job job_blocker_event.wait() return job_result def run_batch(self, qobj_list, wait=5, timeout=120): """Run various programs (a list of pre-compiled quantum programs). This function will block until all programs are processed. The programs to run are extracted from qobj elements of the list. Args: qobj_list (list(dict)): The list of quantum objects to run. wait (int): Time interval to wait between requests for results timeout (int): Total time to wait until the execution stops Returns: list(Result): A list of Result (class). The list will contain one Result object per qobj in the input list. """ job_blocker_event = Event() job_results = [] def job_done_callback(results): """Callback called when the job is done. It basically transforms the results to what the user expects and pass it to the main thread. """ nonlocal job_results job_results = results job_blocker_event.set() self._run_internal(qobj_list, wait=wait, timeout=timeout, callback=job_done_callback) job_blocker_event.wait() return job_results def run_async(self, qobj, wait=5, timeout=60, callback=None): """Run a program (a pre-compiled quantum program) asynchronously. This is a non-blocking function, so it will return immediately. All input for run comes from qobj. Args: qobj(dict): the dictionary of the quantum object to run or list of qobj. wait (int): Time interval to wait between requests for results timeout (int): Total time to wait until the execution stops callback (fn(result)): A function with signature: fn(result): The result param will be a Result object. """ def job_done_callback(results): """Callback called when the job is done. It basically transforms the results to what the user expects and pass it to the main thread. """ callback(results[0]) # The user is expecting a single Result self._run_internal([qobj], wait=wait, timeout=timeout, callback=job_done_callback) def run_batch_async(self, qobj_list, wait=5, timeout=120, callback=None): """Run various programs (a list of pre-compiled quantum program) asynchronously. This is a non-blocking function, so it will return immediately. All input for run comes from qobj. Args: qobj_list (list(dict)): The list of quantum objects to run. wait (int): Time interval to wait between requests for results timeout (int): Total time to wait until the execution stops callback (fn(results)): A function with signature: fn(results): The results param will be a list of Result objects, one Result per qobj in the input list. """ self._run_internal(qobj_list, wait=wait, timeout=timeout, callback=callback) def _run_internal(self, qobj_list, wait=5, timeout=60, callback=None): q_job_list = [] for qobj in qobj_list: q_job = QuantumJob(qobj, preformatted=True, resources={ 'max_credits': qobj['config']['max_credits'], 'wait': wait, 'timeout': timeout}) q_job_list.append(q_job) job_processor = JobProcessor(q_job_list, max_workers=5, callback=callback) job_processor.submit() def execute(self, name_of_circuits=None, backend="local_qasm_simulator", config=None, wait=5, timeout=60, basis_gates=None, coupling_map=None, initial_layout=None, shots=1024, max_credits=3, seed=None, hpc=None): """Execute, compile, and run an array of quantum circuits). This builds the internal "to execute" list which is list of quantum circuits to run on different backends. Args: name_of_circuits (list[hashable] or None): circuit names to be executed. If None, all the circuits will be executed. backend (str): a string representing the backend to compile to. config (dict): a dictionary of configurations parameters for the compiler. wait (int): Time interval to wait between requests for results timeout (int): Total time to wait until the execution stops basis_gates (str): a comma separated string and are the base gates, which by default are: u1,u2,u3,cx,id. coupling_map (dict): A directed graph of coupling:: { control(int): [ target1(int), target2(int), , ... ], ... } eg. {0: [2], 1: [2], 3: [2]} initial_layout (dict): A mapping of qubit to qubit { ("q", start(int)): ("q", final(int)), ... } eg. { ("q", 0): ("q", 0), ("q", 1): ("q", 1), ("q", 2): ("q", 2), ("q", 3): ("q", 3) } shots (int): the number of shots max_credits (int): the max credits to use 3, or 5 seed (int): the initial seed the simulators use hpc (dict): This will setup some parameter for ibmqx_hpc_qasm_simulator, using a JSON-like format like:: { 'multi_shot_optimization': Boolean, 'omp_num_threads': Numeric } This parameter MUST be used only with ibmqx_hpc_qasm_simulator, otherwise the SDK will warn the user via logging, and set the value to None. Returns: Result: status done and populates the internal __quantum_program with the data """ # TODO: Jay: currently basis_gates, coupling_map, intial_layout, shots, # max_credits, and seed are extra inputs but I would like them to go # into the config qobj = self.compile(name_of_circuits=name_of_circuits, backend=backend, config=config, basis_gates=basis_gates, coupling_map=coupling_map, initial_layout=initial_layout, shots=shots, max_credits=max_credits, seed=seed, hpc=hpc) result = self.run(qobj, wait=wait, timeout=timeout) return result ############################################################### # utility methods ############################################################### @staticmethod def _get_single_item(items, item_description="an item"): """ Return the first and only element of `items`, raising an error otherwise. Args: items (list): list of items. item_description (string): text description of the item type. Returns: object: the first and only element of `items`. Raises: QISKitError: if the list does not have exactly one item. """ if len(items) == 1: return items[0] else: raise QISKitError( "The name of %s needs to be explicitly indicated, as there is " "more than one available" % item_description) def _create_id(self, prefix, existing_ids): """ Return an automatically generated identifier, increased sequentially based on the internal `_counter` generator, with the form "[prefix][numeric_id]" (ie. "q2", where the prefix is "q"). Args: prefix (str): string to be prepended to the numeric id. existing_ids (iterable): list of ids that should be checked for duplicates. Returns: str: the new identifier. Raises: QISKitError: if the identifier is already in `existing_ids`. """ i = next(self.__counter) identifier = "%s%i" % (prefix, i) if identifier not in existing_ids: return identifier raise QISKitError("The automatically generated identifier '%s' already " "exists" % identifier)
40.207597
98
0.551558
4a0156a4e27cf2115de4d61bddfc4351db4e7446
1,167
py
Python
tests/test_HomoscedasticGPRegression.py
Sclare87/UQ360
2378bfa4a8d61f813afbf6854341888434c9eb11
[ "Apache-2.0" ]
148
2021-05-27T20:52:51.000Z
2022-03-16T22:49:48.000Z
tests/test_HomoscedasticGPRegression.py
Sclare87/UQ360
2378bfa4a8d61f813afbf6854341888434c9eb11
[ "Apache-2.0" ]
9
2021-06-21T18:45:07.000Z
2021-11-08T14:42:30.000Z
tests/test_HomoscedasticGPRegression.py
Sclare87/UQ360
2378bfa4a8d61f813afbf6854341888434c9eb11
[ "Apache-2.0" ]
27
2021-06-01T18:29:02.000Z
2022-03-02T06:56:03.000Z
import unittest import numpy as np import torch np.random.seed(42) torch.manual_seed(42) class TestHomoscedasticGPRegression(unittest.TestCase): def _generate_mock_data(self, n_samples, n_features): from sklearn.datasets import make_regression return make_regression(n_samples, n_features, random_state=42) def test_fit_predict_and_metrics(self): from uq360.algorithms.homoscedastic_gaussian_process_regression import HomoscedasticGPRegression from uq360.metrics import compute_regression_metrics X, y = self._generate_mock_data(200, 3) y = y.reshape(-1, 1) uq_model = HomoscedasticGPRegression() uq_model.fit(X, y) yhat, yhat_lb, yhat_ub, yhat_lb_epi, yhat_ub_epi, yhat_dists = uq_model.predict(X, return_dists=True, return_epistemic=True) results = compute_regression_metrics(y.ravel(), yhat, yhat_lb, yhat_ub) coverage = results["picp"] avg_width = results["mpiw"] rmse = results["rmse"] nll = results["nll"] auucc_gain = results["auucc_gain"] assert (coverage > 0.0) if __name__ == '__main__': unittest.main()
29.175
132
0.702656
4a0156dfd55bc96fff6d0ebedac0683aa12d39af
988
py
Python
magicicon.py
armaan115/magicicon
ac71dc823c9121fa0b6dd504668551f3a97e62ff
[ "MIT" ]
1
2019-05-23T00:41:44.000Z
2019-05-23T00:41:44.000Z
magicicon.py
armaan115/magicicon
ac71dc823c9121fa0b6dd504668551f3a97e62ff
[ "MIT" ]
null
null
null
magicicon.py
armaan115/magicicon
ac71dc823c9121fa0b6dd504668551f3a97e62ff
[ "MIT" ]
null
null
null
from __future__ import print_function import subprocess import sys def convert(filename,outputfile): import cloudconvert as c # to write output to output.txt o=open('output.txt', 'w') def get_extension(filename): import os.path return os.path.splitext(filename)[1] filen_extension=get_extension(filename) print('Uploading file to server...',file=o) api=c.Api('MJ9qM1Eu2PhM7yegfHBQiAjxrcUmGQCo3uC1yymNyPoiUGFhXIUpbtIHXkQjiBJP') #api key for cloudconvert process = api.convert({ 'inputformat': filen_extension.replace('.',''), 'outputformat': 'ico', 'input': 'upload', 'file': open(filename, 'rb') }) print('Uploaded file to server. Converting....',file=o) process.wait() download_file_name=filename.replace(filen_extension,'.icns') print('Downloading....',file=o) process.download(outputfile) print('Done!',file=o)
24.7
109
0.635628
4a0156e0fa422d89bed2632f807715d6424a027d
1,689
py
Python
src/robot/reporting/stringcache.py
yahman72/robotframework
9f82d9a2bf088073859eb23a33d275c6a8c0b975
[ "ECL-2.0", "Apache-2.0" ]
1
2015-03-11T14:59:20.000Z
2015-03-11T14:59:20.000Z
src/robot/reporting/stringcache.py
yahman72/robotframework
9f82d9a2bf088073859eb23a33d275c6a8c0b975
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/robot/reporting/stringcache.py
yahman72/robotframework
9f82d9a2bf088073859eb23a33d275c6a8c0b975
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright 2008-2015 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from operator import itemgetter from robot.utils import compress_text class StringIndex(long): pass class StringCache(object): _compress_threshold = 80 _use_compressed_threshold = 1.1 _zero_index = StringIndex(0) def __init__(self): self._cache = {'*': self._zero_index} def add(self, text): if not text: return self._zero_index text = self._encode(text) if text not in self._cache: self._cache[text] = StringIndex(len(self._cache)) return self._cache[text] def _encode(self, text): raw = self._raw(text) if raw in self._cache or len(raw) < self._compress_threshold: return raw compressed = compress_text(text) if len(compressed) * self._use_compressed_threshold < len(raw): return compressed return raw def _raw(self, text): return '*'+text def dump(self): return tuple(item[0] for item in sorted(self._cache.iteritems(), key=itemgetter(1)))
30.709091
75
0.657194
4a015772aa9a111c73d0a4f620f09e5f602d331b
1,340
py
Python
Scripts/Test/MotorController.py
AwesomeTac0/TacoBot--LitterBox
83a1628b616236b8a4af5444f61180d164f20c1b
[ "MIT" ]
null
null
null
Scripts/Test/MotorController.py
AwesomeTac0/TacoBot--LitterBox
83a1628b616236b8a4af5444f61180d164f20c1b
[ "MIT" ]
null
null
null
Scripts/Test/MotorController.py
AwesomeTac0/TacoBot--LitterBox
83a1628b616236b8a4af5444f61180d164f20c1b
[ "MIT" ]
null
null
null
import time from adafruit_motor import stepper from adafruit_motorkit import MotorKit kit = MotorKit() # Motor motorSwitcher = { 1: kit.stepper1.onestep, 2: kit.stepper2.onestep } # Style styleSwitcher = { "S": stepper.SINGLE, "D": stepper.DOUBLE, "I": stepper.INTERLEAVE, "M": stepper.MICROSTEP } # Direction directionSwitcher = { "F": "stepper.FORWARD", "B": "stepper.BACKWARD" } # Release # 0 = All Motors | 1 = Motor1 | 2 = Motor2 def ReleaseMotors(motor=0): if (motor == 0): kit.stepper1.release() kit.stepper2.release() if (motor == 1): kit.stepper1.release() if (motor == 2): kit.stepper2.release() # Let's GOOOOOO! def StepperController(Motor=1, Style="D", Direction="F", StepCount = 200): tempMotor = Motor Motor = motorSwitcher.get(Motor) Style = styleSwitcher.get(Style) Direction = directionSwitcher.get(Direction) print(f"Motor: {tempMotor} | Style: {Style} | Direction: {Direction} | StepCount: {StepCount}") for i in range(StepCount): Motor(direction=Direction, style=Style) StepperController(StepCount=1000) StepperController(Style="S", StepCount=1000) StepperController(Style="I", StepCount=1000) StepperController(Style="M", StepCount=1000) ReleaseMotors()
23.103448
99
0.648507
4a01583f62cdbbf67116cab75245290a9e7fed08
9,696
py
Python
path.py
chrisbloecker/as1-final
ff260a8511e0c22ebb58662ec188251b3cc47311
[ "MIT" ]
null
null
null
path.py
chrisbloecker/as1-final
ff260a8511e0c22ebb58662ec188251b3cc47311
[ "MIT" ]
null
null
null
path.py
chrisbloecker/as1-final
ff260a8511e0c22ebb58662ec188251b3cc47311
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # This module provides modelling of obstacles in a scene and path planning to # avoid those obstacles. Currently, Obstacles can be boxes that are described # by scaling and translating unit cubes. # # Author: Christopher Blöcker import numpy as np from abc import ABC, abstractmethod from redblack import * # functions for tuple projections fst = lambda p: p[0] snd = lambda p: p[1] # A 3D point. class Point(): def __init__(self, x, y , z): self.x = x self.y = y self.z = z def __eq__(self, other): return np.abs(self.x - other.x) <= np.finfo(float).eps \ and np.abs(self.y - other.y) <= np.finfo(float).eps \ and np.abs(self.z - other.z) <= np.finfo(float).eps def __repr__(self): return "({:.2f}, {:.2f}, {:.2f})".format(self.x, self.y, self.z) # measures the distance to a given point def distanceTo(self, point): return np.linalg.norm([self.x - point.x, self.y - point.y, self.z - point.z]) def sub(self, point): return Point( self.x - point.x , self.y - point.y , self.z - point.z ) # An abstract object. class Object(ABC): def __init__(self): pass # Checks whether the given point lies within the object. @abstractmethod def contains(self, point): raise Exception("contains not implemented.") # Scales an object according to the given scaling factors in x-, y-, and z-direction. class Scale(Object): def __init__(self, scaledObject, scaleX, scaleY, scaleZ): self.scaledObject = scaledObject self.scaleX = scaleX self.scaleY = scaleY self.scaleZ = scaleZ # Transforms the given point into the coordinate frame of the object and # checks whether the point lies within the object. def contains(self, point): scaledPoint = Point( point.x / self.scaleX , point.y / self.scaleY , point.z / self.scaleZ ) return self.scaledObject.contains(scaledPoint) # Translates an object according to the given translations in x-, y-, and z-direction. class Translate(Object): def __init__(self, translatedObject, translateX, translateY, translateZ): self.translatedObject = translatedObject self.translateX = translateX self.translateY = translateY self.translateZ = translateZ # Translates the given point into the coordinate frame of the object and # checks whether the point lies within the object. def contains(self, point): translatedPoint = Point( point.x - self.translateX , point.y - self.translateY , point.z - self.translateZ ) return self.translatedObject.contains(translatedPoint) # A unit cube. class Cube(Object): def __init(self): Object.__init__(self) # The unit cube contains those points which have coordinates between 0 and 1 # in all dimenstions. def contains(self, point): return 0 <= point.x <= 1 \ and 0 <= point.y <= 1 \ and 0 <= point.z <= 1 # A scene is represented as a cuboid and contains a set of obstacles that should # be avoided in path planning. When constructed, the scene is represented as a # regular cartesian grid according to the given resolution. The space is sampled # and occupied grid cells are marked. class Scene(): def __init__(self, dimX, dimY, dimZ, resolution, obstacles): self.resolution = resolution self.bounds = Scale(Cube(), dimX, dimY, dimZ) # number of grid cells in each direction x = int(dimX / resolution) y = int(dimY / resolution) z = int(dimZ / resolution) # to store which cells are occupied self.space = np.zeros((x, y, z)) # Build the scene according to the given resolution and sample the space to # represent it as a 3D array with boolean values where True means that the # respective spot is occupied by an obstacle for i in range(x): for j in range(y): for k in range(z): p = Point((i + 0.5) * resolution, (j + 0.5) * resolution, (k + 0.5) * resolution) self.space[i, j, k] = any([obstacle.contains(p) for obstacle in obstacles]) # Get the middle of a given grid cell (gx, gy, gz). def getPoint(self, xyz): return Point( (xyz[0] + 0.5) * self.resolution , (xyz[1] + 0.5) * self.resolution , (xyz[2] + 0.5) * self.resolution ) # Get the coordinate in the grid of a point. def getCoordinate(self, point): return ( int(point.x / self.resolution) , int(point.y / self.resolution) , int(point.z / self.resolution) ) # Use A* to plan a path from start to target and avoids the obstacles in the scene. def planPath(self, start, target): # the start point must be within the scene if not self.bounds.contains(start): raise Exception("Start ({:.2f}, {:.2f}, {:.2f}) is out of bounds!".format(start.x, start.y, start.z)) # the target point must be within the scene if not self.bounds.contains(target): raise Exception("Target ({:.2f}, {:.2f}, {:.2f}) is out of bounds!".format(target.x, target.y, target.z)) # the grid cells corresponding to start and target startCell = self.getCoordinate(start) targetCell = self.getCoordinate(target) # the start point must not lie within an obstacle if self.space[startCell[0], startCell[1], startCell[2]]: raise Exception("Start {} point lies within an obstacle!".format(self.getPoint(startCell))) # the target point must not lie within an obstacle if self.space[targetCell[0], targetCell[1], targetCell[2]]: raise Exception("Target {} point lies within an obstacle!".format(self.getPoint(startCell))) # explored and unexplored cells explored = set() unexplored = { startCell : start.distanceTo(target) } # a queue of cells to retrieve that cell with expected lowest cost queue = Empty(key = snd).insert(((startCell), start.distanceTo(target))) # for reconstructing the path, stores the predecessor of cells along the cheapest path cameFrom = { startCell : startCell } # costs to get to the grid cells costs = { startCell : 0 } # continue planning as long as we have unexplored grid cells left while len(unexplored) > 0: (current, currentCost), queue = queue.popMin() while current not in unexplored: (current, currentCost), queue = queue.popMin() unexplored.pop(current) explored.add(current) # we found a path! if current == targetCell: return self.postprocessPath(self.reconstructPath(cameFrom, current, target)) p = self.getPoint(current) for dx in [-1, 0, 1]: for dy in [-1, 0, 1]: for dz in [-1, 0, 1]: x = current[0] + dx y = current[1] + dy z = current[2] + dz q = self.getPoint((x, y, z)) if (x, y, z) not in explored and self.bounds.contains(q) and not self.space[x, y, z]: cost = currentCost + p.distanceTo(q) if (x, y, z) not in unexplored or cost < unexplored[(x, y, z)]: queue = queue.insert(((x, y, z), cost + q.distanceTo(target))) unexplored[(x, y, z)] = cost cameFrom[(x, y, z)] = current costs[(x, y, z)] = cost raise Exception("Cannot find a path to target!") def reconstructPath(self, cameFrom, endpoint, target): path = [ target , self.getPoint(endpoint) ] while endpoint != cameFrom[endpoint]: endpoint = cameFrom[endpoint] path.append(self.getPoint(endpoint)) return path[::-1] def postprocessPath(self, path): reducedPath = [path[0]] for i in range(1, len(path) - 1): if path[i].sub(path[i-1]) != path[i+1].sub(path[i]): reducedPath.append(path[i]) reducedPath.append(path[-1]) print("[DEBUG] Original path: {}".format(path)) print("[DEBUG] Reduced path: {}".format(reducedPath)) return reducedPath if __name__ == '__main__': table1 = Translate(Scale(Cube(), 1.30, 0.65, 0.75), 1.67, 0.00, 0.00) table2 = Translate(Scale(Cube(), 1.30, 0.85, 0.60), 1.67, 3.75, 0.00) obstacle1 = Translate(Scale(Cube(), 1.5, 0.85, 2.0), 1.55, 1.05, 0.00) obstacle2 = Translate(Scale(Cube(), 2.0, 1.15, 2.0), 0.00, 2.10, 0.00) obstacle3 = Translate(Scale(Cube(), 1.5, 1.15, 2.0), 2.50, 2.10, 0.00) obstacle4 = Translate(Scale(Cube(), 4.0, 1.15, 1.3), 0.00, 2.10, 0.70) scene = Scene(4.0, 5.0, 2.0, 0.1, [table1, table2, obstacle1, obstacle2, obstacle3, obstacle4]) start = Point(2.0, 0.5, 0.9) target = Point(2.0, 4.3, 0.9) path = scene.planPath(start, target) print(path) pathLength = 0 i = 1 while i < len(path): pathLength += path[0].distanceTo(path[1]) i += 1 print(pathLength)
36.86692
117
0.57632
4a01584f114390f68f7400eb7c45c44e158b8862
2,313
py
Python
tests/test_provider_hashicorp_hcp.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
507
2017-07-26T02:58:38.000Z
2022-01-21T12:35:13.000Z
tests/test_provider_hashicorp_hcp.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
135
2017-07-20T12:01:59.000Z
2021-10-04T22:25:40.000Z
tests/test_provider_hashicorp_hcp.py
mjuenema/python-terrascript
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
[ "BSD-2-Clause" ]
81
2018-02-20T17:55:28.000Z
2022-01-31T07:08:40.000Z
# tests/test_provider_hashicorp_hcp.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:18:10 UTC) def test_provider_import(): import terrascript.provider.hashicorp.hcp def test_resource_import(): from terrascript.resource.hashicorp.hcp import hcp_aws_network_peering from terrascript.resource.hashicorp.hcp import hcp_aws_transit_gateway_attachment from terrascript.resource.hashicorp.hcp import hcp_consul_cluster from terrascript.resource.hashicorp.hcp import hcp_consul_cluster_root_token from terrascript.resource.hashicorp.hcp import hcp_consul_snapshot from terrascript.resource.hashicorp.hcp import hcp_hvn from terrascript.resource.hashicorp.hcp import hcp_hvn_peering_connection from terrascript.resource.hashicorp.hcp import hcp_hvn_route from terrascript.resource.hashicorp.hcp import hcp_vault_cluster from terrascript.resource.hashicorp.hcp import hcp_vault_cluster_admin_token def test_datasource_import(): from terrascript.data.hashicorp.hcp import hcp_aws_network_peering from terrascript.data.hashicorp.hcp import hcp_aws_transit_gateway_attachment from terrascript.data.hashicorp.hcp import hcp_consul_agent_helm_config from terrascript.data.hashicorp.hcp import hcp_consul_agent_kubernetes_secret from terrascript.data.hashicorp.hcp import hcp_consul_cluster from terrascript.data.hashicorp.hcp import hcp_consul_versions from terrascript.data.hashicorp.hcp import hcp_hvn from terrascript.data.hashicorp.hcp import hcp_hvn_peering_connection from terrascript.data.hashicorp.hcp import hcp_hvn_route from terrascript.data.hashicorp.hcp import hcp_packer_image from terrascript.data.hashicorp.hcp import hcp_packer_image_iteration from terrascript.data.hashicorp.hcp import hcp_packer_iteration from terrascript.data.hashicorp.hcp import hcp_vault_cluster # TODO: Shortcut imports without namespace for official and supported providers. # TODO: This has to be moved into a required_providers block. # def test_version_source(): # # import terrascript.provider.hashicorp.hcp # # t = terrascript.provider.hashicorp.hcp.hcp() # s = str(t) # # assert 'https://github.com/hashicorp/terraform-provider-hcp' in s # assert '0.17.0' in s
32.577465
85
0.806312
4a015876d1862f011a93594adc2e4a5b6f7994cd
2,870
py
Python
mercurial/hgweb/wsgicgi.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
1
2022-01-25T22:52:58.000Z
2022-01-25T22:52:58.000Z
mercurial/hgweb/wsgicgi.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
mercurial/hgweb/wsgicgi.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
# hgweb/wsgicgi.py - CGI->WSGI translator # # Copyright 2006 Eric Hopper <hopper@omnifarious.org> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. # # This was originally copied from the public domain code at # http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side from __future__ import absolute_import from .. import ( encoding, util, ) from . import ( common, ) def launch(application): util.setbinary(util.stdin) util.setbinary(util.stdout) environ = dict(encoding.environ.iteritems()) environ.setdefault(r'PATH_INFO', '') if environ.get(r'SERVER_SOFTWARE', r'').startswith(r'Microsoft-IIS'): # IIS includes script_name in PATH_INFO scriptname = environ[r'SCRIPT_NAME'] if environ[r'PATH_INFO'].startswith(scriptname): environ[r'PATH_INFO'] = environ[r'PATH_INFO'][len(scriptname):] stdin = util.stdin if environ.get(r'HTTP_EXPECT', r'').lower() == r'100-continue': stdin = common.continuereader(stdin, util.stdout.write) environ[r'wsgi.input'] = stdin environ[r'wsgi.errors'] = util.stderr environ[r'wsgi.version'] = (1, 0) environ[r'wsgi.multithread'] = False environ[r'wsgi.multiprocess'] = True environ[r'wsgi.run_once'] = True if environ.get(r'HTTPS', r'off').lower() in (r'on', r'1', r'yes'): environ[r'wsgi.url_scheme'] = r'https' else: environ[r'wsgi.url_scheme'] = r'http' headers_set = [] headers_sent = [] out = util.stdout def write(data): if not headers_set: raise AssertionError("write() before start_response()") elif not headers_sent: # Before the first output, send the stored headers status, response_headers = headers_sent[:] = headers_set out.write('Status: %s\r\n' % status) for header in response_headers: out.write('%s: %s\r\n' % header) out.write('\r\n') out.write(data) out.flush() def start_response(status, response_headers, exc_info=None): if exc_info: try: if headers_sent: # Re-raise original exception if headers sent raise exc_info[0](exc_info[1], exc_info[2]) finally: exc_info = None # avoid dangling circular ref elif headers_set: raise AssertionError("Headers already set!") headers_set[:] = [status, response_headers] return write content = application(environ, start_response) try: for chunk in content: write(chunk) if not headers_sent: write('') # send headers now if body was empty finally: getattr(content, 'close', lambda: None)()
31.538462
75
0.619512
4a01588dbc9cf0beb44b28ce2a9d49b6e988bc17
36,560
py
Python
test/functional/p2p_compactblocks.py
CodeIsTheKey/raptoreum
8a44d39f985c503f08969f91e0c946042c173496
[ "MIT" ]
1
2021-12-18T04:44:10.000Z
2021-12-18T04:44:10.000Z
test/functional/p2p_compactblocks.py
CodeIsTheKey/raptoreum
8a44d39f985c503f08969f91e0c946042c173496
[ "MIT" ]
null
null
null
test/functional/p2p_compactblocks.py
CodeIsTheKey/raptoreum
8a44d39f985c503f08969f91e0c946042c173496
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """CompactBlocksTest -- test compact blocks (BIP 152, without segwit support, version 1) """ from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.blocktools import create_block, create_coinbase from test_framework.script import CScript, OP_TRUE # TestP2PConn: A peer we use to send messages to raptoreumd, and store responses. class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.last_sendcmpct = [] self.block_announced = False # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.announced_blockhashes = set() def on_sendcmpct(self, message): self.last_sendcmpct.append(message) def on_cmpctblock(self, message): self.block_announced = True self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256) def on_headers(self, message): self.block_announced = True for x in self.last_message["headers"].headers: x.calc_sha256() self.announced_blockhashes.add(x.sha256) def on_inv(self, message): for x in self.last_message["inv"].inv: if x.type == 2: self.block_announced = True self.announced_blockhashes.add(x.hash) # Requires caller to hold mininode_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): return (block_hash in self.announced_blockhashes) wait_until(received_hash, timeout=timeout, lock=mininode_lock) def send_await_disconnect(self, message, timeout=30): """Sends a message to the node and wait for disconnect. This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) wait_until(lambda: not self.is_connected, timeout=timeout, lock=mininode_lock) class CompactBlocksTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True # both nodes has the same version self.num_nodes = 2 self.extra_args = [["-txindex"]] * 2 self.utxos = [] def build_block_on_tip(self, node): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1) block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) self.nodes[0].generate(100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) for i in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers with the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): return (len(test_node.last_sendcmpct) > 0) wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) with mininode_lock: # Check that the first version received is the preferred one assert_equal(test_node.last_sendcmpct[0].version, preferred_version) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert(peer.block_announced) with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() sendcmpct.version = preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True sendcmpct.version = preferred_version sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) # This code should be enabled after increasing cmctblk version #if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. # sendcmpct.version = preferred_version-1 # sendcmpct.announce = True # old_node.send_and_ping(sendcmpct) # Header sync # old_node.request_headers_and_sync(locator=[tip]) # check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message) # This test actually causes raptoreumd to (reasonably!) disconnect us, so do this last. def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # raptoreumd's choice of nonce. def test_compactblock_construction(self, node, test_node, version): # Generate a bunch of transactions. node.generate(101) num_transactions = 25 address = node.getnewaddress() for i in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) test_node.wait_for_block_announcement(tip) # Make sure we will receive a fast-announce compact block self.request_cb_announcements(test_node, node, version) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False)) for tx in block.vtx: tx.calc_sha256() block.rehash() # Wait until the block was announced (via compact blocks) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) # Now fetch the compact block using a normal non-announce getdata with mininode_lock: test_node.clear_block_announcement() inv = CInv(20, block_hash) # 20 == "CompactBlock" test_node.send_message(msg_getdata([inv])) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block): # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert(len(header_and_shortids.prefilled_txn) >= 1) assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): if (len(header_and_shortids.prefilled_txn) > 0 and header_and_shortids.prefilled_txn[0].index == index): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that raptoreumd requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. def test_compactblock_requests(self, node, test_node): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node) with mininode_lock: test_node.last_message.pop("getdata", None) if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert_equal(test_node.last_message["getdata"].inv[0].type, 20) assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() comp_block.shortids = [ calculate_shortid(k0, k1, block.vtx[0].sha256) ] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert("getblocktxn" in peer.last_message) absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute() assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions comp_block.initialize_from_block(block, prefill_list=[0, 1, 5]) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(block.vtx[1])) assert(block.vtx[1].hash in node.getrawmempool()) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4]) test_getblocktxn_response(comp_block, test_node, [5]) msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) for tx in block.vtx[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) # Clear out last request. with mininode_lock: test_node.last_message.pop("getblocktxn", None) # Send compact block comp_block.initialize_from_block(block, prefill_list=[0]) test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert("getblocktxn" not in test_node.last_message) # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in block.vtx[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:6]: assert(tx.hash in mempool) # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0]) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indexes = [] with mininode_lock: assert("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) # Now give an incorrect response. # Note that it's possible for raptoreumd to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change was made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert(test_node.last_message["getdata"].inv[0].type == 2) assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # raptoreumd will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0]) with mininode_lock: test_node.last_message.pop("block", None) test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16)) assert "blocktxn" not in test_node.last_message def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(20, int(new_blocks[0], 16))])) wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() node.generate(1) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() with mininode_lock: test_node.last_message.pop("block", None) test_node.send_message(msg_getdata([CInv(20, int(new_blocks[0], 16))])) wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height-5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert(found) # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: assert "blocktxn" not in test_node.last_message def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) [l.clear_block_announcement() for l in listeners] node.submitblock(ToHex(block)) for l in listeners: wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) with mininode_lock: for l in listeners: assert "cmpctblock" in l.last_message l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node): assert(len(self.utxos)) utxo = self.utxos[0] block = self.build_block_with_transactions(node, utxo, 5) del block.vtx[3] block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4]) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert(int(node.getbestblockhash(), 16) is not block.sha256) test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) msg = msg_sendcmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): assert(len(self.utxos)) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert "getblocktxn" in peer.last_message return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() cmpct_block.prefilled_txn[0].tx = CTxIn() delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert(int(node.getbestblockhash(), 16) != block.sha256) msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): # Setup the p2p connections and start up the network thread. self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn()) self.second_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK) self.old_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK) network_thread_start() self.test_node.wait_for_verack() # We will need UTXOs to construct transactions in later tests. self.make_utxos() self.log.info("Running tests:") self.log.info("Testing SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_sendcmpct(self.nodes[1], self.second_node, 1) self.sync_blocks() self.log.info("Testing compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_compactblock_construction(self.nodes[1], self.second_node, 1) self.sync_blocks() self.log.info("Testing compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node) self.sync_blocks() self.test_compactblock_requests(self.nodes[1], self.second_node) self.sync_blocks() self.log.info("Testing getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_getblocktxn_requests(self.nodes[1], self.second_node, 1) self.sync_blocks() self.log.info("Testing getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_getblocktxn_handler(self.nodes[1], self.second_node, 1) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) self.sync_blocks() self.log.info("Testing compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) self.sync_blocks() self.test_compactblocks_not_at_tip(self.nodes[1], self.second_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) self.sync_blocks() self.log.info("Testing handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) self.sync_blocks() self.test_incorrect_blocktxn_response(self.nodes[1], self.second_node, 1) self.sync_blocks() # End-to-end block relay tests self.log.info("Testing end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0], 1) self.request_cb_announcements(self.old_node, self.nodes[1], 1) self.request_cb_announcements(self.second_node, self.nodes[1], 1) self.test_end_to_end_block_relay(self.nodes[0], [self.second_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay(self.nodes[1], [self.second_node, self.test_node, self.old_node]) self.log.info("Testing handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.second_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) self.log.info("Testing reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.second_node, self.old_node) self.sync_blocks() self.log.info("Testing invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() if __name__ == '__main__': CompactBlocksTest().main()
45.929648
134
0.672183
4a0158e7cbe0434aa21b9b4041470ef056ed1bf8
4,032
py
Python
python/tvm/topi/bifrost/dense.py
mwillsey/incubator-tvm
e02dc69fef294eb73dd65d18949ed9e108f60cda
[ "Apache-2.0" ]
2
2020-04-17T02:25:16.000Z
2020-11-25T11:39:43.000Z
python/tvm/topi/bifrost/dense.py
mwillsey/incubator-tvm
e02dc69fef294eb73dd65d18949ed9e108f60cda
[ "Apache-2.0" ]
3
2020-04-20T15:37:55.000Z
2020-05-13T05:34:28.000Z
python/tvm/topi/bifrost/dense.py
mwillsey/incubator-tvm
e02dc69fef294eb73dd65d18949ed9e108f60cda
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name,unused-variable """dense schedule on ARM Mali Biforst GPU""" from tvm import te from tvm import autotvm from .. import nn from ..util import traverse_inline @autotvm.register_topi_compute("dense.biforst") def dense(_, data, weight, bias=None, out_dtype=None): """Dense operator on Biforst""" return nn.dense(data, weight, bias, out_dtype) @autotvm.register_topi_schedule("dense.bifrost") def schedule_dense(cfg, outs): """Schedule for dense operator. Parameters ---------- cfg: ConfigEntity The config entity for this template outs: Array of Tensor The computation graph description of dense in the format of an array of tensors. Returns ------- s: Schedule The computation schedule for dense. """ outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs s = te.create_schedule([x.op for x in outs]) def _callback(op): if op.tag == "dense": vec_size = [1, 2, 4, 8, 16] max_unroll = 32 dense_out = op.output(0) output = outs[0] y, x = s[output].op.axis c = s[dense_out].op.reduce_axis[0] ##### space definition begin ##### cfg.define_split("tile_y", y, num_outputs=3) cfg.define_split("tile_x", x, num_outputs=3) cfg.define_split("c_unroll", c, num_outputs=2, max_factor=64) # fallback support if cfg.is_fallback: ref_log = autotvm.tophub.load_reference_log("mali", "rk3399", "dense.bifrost") cfg.fallback_with_reference_log(ref_log) ##### space definition end ##### if dense_out.op in s.outputs: dense_out = s.cache_write(output, "local") by, ty, yi = cfg["tile_y"].apply(s, output, y) bx, tx, xi = cfg["tile_x"].apply(s, output, x) s[output].bind(by, te.thread_axis("blockIdx.y")) s[output].bind(bx, te.thread_axis("blockIdx.x")) s[output].bind(ty, te.thread_axis("threadIdx.y")) s[output].bind(tx, te.thread_axis("threadIdx.x")) if cfg["tile_y"].size[-1] < max_unroll: s[output].unroll(yi) if cfg["tile_x"].size[-1] in vec_size: s[output].vectorize(xi) s[dense_out].compute_at(s[output], tx) k = s[dense_out].op.reduce_axis[0] y, x = s[dense_out].op.axis k, k_unroll = cfg["c_unroll"].apply(s, dense_out, k) s[dense_out].reorder(k, k_unroll, y, x) s[dense_out].unroll(k_unroll) if cfg["tile_y"].size[-1] < max_unroll: s[dense_out].unroll(y) if cfg["tile_x"].size[-1] in vec_size: s[dense_out].vectorize(x) traverse_inline(s, outs[0].op, _callback) return s def fuse_and_bind(s, tensor, axis=None, num_thread=None): """ fuse all the axis and bind to GPU threads """ axis = axis or s[tensor].op.axis fused = s[tensor].fuse(*axis) bx, tx = s[tensor].split(fused, num_thread) s[tensor].bind(bx, te.thread_axis("blockIdx.x")) s[tensor].bind(tx, te.thread_axis("threadIdx.x")) return bx, tx
35.681416
94
0.620784
4a015be578579aa2168a3d907cf3bff7ffd8a536
1,376
py
Python
migrations/versions/c4670f3bb828_.py
majdal/tasking-manager
eb8851a9095a3eb64c531946fd7ee81cb95ccc8d
[ "BSD-2-Clause" ]
421
2017-02-16T15:02:51.000Z
2022-03-06T07:12:14.000Z
migrations/versions/c4670f3bb828_.py
majdal/tasking-manager
eb8851a9095a3eb64c531946fd7ee81cb95ccc8d
[ "BSD-2-Clause" ]
3,143
2017-02-14T16:47:25.000Z
2022-03-30T11:25:20.000Z
migrations/versions/c4670f3bb828_.py
majdal/tasking-manager
eb8851a9095a3eb64c531946fd7ee81cb95ccc8d
[ "BSD-2-Clause" ]
280
2017-04-06T19:51:32.000Z
2022-03-16T09:21:27.000Z
"""empty message Revision ID: c4670f3bb828 Revises: 0aaac86a48dc Create Date: 2017-05-03 13:59:37.296261 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "c4670f3bb828" down_revision = "0aaac86a48dc" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "messages", sa.Column("id", sa.Integer(), nullable=False), sa.Column("message", sa.String(), nullable=True), sa.Column("subject", sa.String(), nullable=True), sa.Column("from_user_id", sa.BigInteger(), nullable=True), sa.Column("to_user_id", sa.BigInteger(), nullable=True), sa.Column("date", sa.DateTime(), nullable=True), sa.Column("read", sa.Boolean(), nullable=True), sa.ForeignKeyConstraint(["from_user_id"], ["users.id"]), sa.ForeignKeyConstraint(["to_user_id"], ["users.id"]), sa.PrimaryKeyConstraint("id"), ) op.create_index( op.f("ix_messages_to_user_id"), "messages", ["to_user_id"], unique=False ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index(op.f("ix_messages_to_user_id"), table_name="messages") op.drop_table("messages") # ### end Alembic commands ###
30.577778
80
0.651163
4a015d752e4b8454670e2113396b56939e72935e
2,618
py
Python
test/multivec_test.py
4dn-dcic/clodius
aa31b3d90a5a9fec883c20cab31ad4d347cd52cd
[ "MIT" ]
null
null
null
test/multivec_test.py
4dn-dcic/clodius
aa31b3d90a5a9fec883c20cab31ad4d347cd52cd
[ "MIT" ]
1
2019-03-28T20:08:01.000Z
2019-03-28T20:08:01.000Z
test/multivec_test.py
4dn-dcic/clodius
aa31b3d90a5a9fec883c20cab31ad4d347cd52cd
[ "MIT" ]
null
null
null
from __future__ import print_function import click.testing as clt import clodius.cli.aggregate as cca import clodius.cli.convert as ccc import clodius.db_tiles as cdt import os import os.path as op import sqlite3 import sys import tempfile testdir = op.realpath(op.dirname(__file__)) def test_bedfile_to_multivec(): runner = clt.CliRunner() input_file = op.join(testdir, 'sample_data', 'sample.bed.gz') f = tempfile.NamedTemporaryFile(delete=False) # print("input_file", input_file) result = runner.invoke( ccc.bedfile_to_multivec, [input_file, '--has-header', '--assembly', 'hg38', '--base-resolution' , '10']) import traceback a,b,tb = result.exc_info ''' print("exc_info:", result.exc_info) print("result:", result) print("result.output", result.output) print("result.error", traceback.print_tb(tb)) print("Exception:", a,b) ''' def test_load_multivec_tiles(): input_file = op.join(testdir, 'sample_data', 'sample.bed.multires.mv5') def test_states_format_befile_to_multivec(): runner = clt.CliRunner() input_file = op.join(testdir, 'sample_data', 'states_format_input_testfile.bed.gz') rows_info_file = op.join(testdir,'sample_data', 'states_format_test_row_infos.txt') f = tempfile.NamedTemporaryFile(delete=False) # print("input_file", input_file) result = runner.invoke( ccc.bedfile_to_multivec, [input_file, '--format', 'states', '--row-infos-filename', rows_info_file, '--assembly', 'hg38', '--starting-resolution' , '200', '--num-rows', '10']) import traceback a,b,tb = result.exc_info ''' print("exc_info:", result.exc_info) print("result:", result) print("result.output", result.output) print("result.error", traceback.print_tb(tb)) print("Exception:", a,b) ''' def test_ignore_bedfile_headers(): runner = clt.CliRunner() input_file = op.join(testdir, 'sample_data', '3_header_100_testfile.bed.gz') rows_info_file = op.join(testdir, 'sample_data', '3_header_100_row_infos.txt') f = tempfile.NamedTemporaryFile(delete=False) result = runner.invoke( ccc.bedfile_to_multivec, [input_file, '--format', 'states', '--row-infos-filename', rows_info_file, '--assembly', 'hg19', '--starting-resolution', '200', '--num-rows', '15']) import traceback a,b,tb = result.exc_info
30.8
87
0.625286
4a015d8f27a56d78100017e30c04ce9809095ed4
4,516
py
Python
noxfile.py
pchauhan-qlogic/google-resumable-media-python
3230b467b92e210e402ee45da5f89886f0ba9079
[ "Apache-2.0" ]
1
2019-07-30T14:24:08.000Z
2019-07-30T14:24:08.000Z
noxfile.py
pchauhan-qlogic/google-resumable-media-python
3230b467b92e210e402ee45da5f89886f0ba9079
[ "Apache-2.0" ]
null
null
null
noxfile.py
pchauhan-qlogic/google-resumable-media-python
3230b467b92e210e402ee45da5f89886f0ba9079
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import nox SYSTEM_TEST_ENV_VARS = ( 'GOOGLE_RESUMABLE_MEDIA_BUCKET', 'GOOGLE_APPLICATION_CREDENTIALS', ) REQUESTS = 'requests >= 2.18.0, < 3.0.0dev' GOOGLE_AUTH = 'google-auth >= 0.10.0' @nox.session(python=['2,7', '3.4', '3.5', '3.6', '3.7']) def unit_tests(session): """Run the unit test suite.""" # Install all test dependencies, then install this package in-place. session.install('mock', 'pytest', 'pytest-cov', REQUESTS) session.install('-e', '.') # Run py.test against the unit tests. # NOTE: We don't require 100% line coverage for unit test runs since # some have branches that are Py2/Py3 specific. line_coverage = '--cov-fail-under=99' session.run( 'py.test', '--cov=google.resumable_media', '--cov=tests.unit', '--cov-append', '--cov-config=.coveragerc', '--cov-report=', line_coverage, os.path.join('tests', 'unit'), *session.posargs ) @nox.session(python='3.6') def docs(session): """Build the docs.""" # Install Sphinx and other dependencies. session.chdir(os.path.realpath(os.path.dirname(__file__))) session.install( 'sphinx', 'sphinx_rtd_theme', 'sphinx-docstring-typing >= 0.0.3', REQUESTS, ) session.install('-e', '.') # Build the docs! session.run('bash', os.path.join('scripts', 'build_docs.sh')) @nox.session(python='3.6') def doctest(session): """Run the doctests.""" # Install Sphinx and other dependencies. session.chdir(os.path.realpath(os.path.dirname(__file__))) session.install( 'sphinx', 'sphinx_rtd_theme', 'sphinx-docstring-typing >= 0.0.3', 'mock', GOOGLE_AUTH, REQUESTS, ) session.install('-e', '.') # Run the doctests with Sphinx. session.run( 'sphinx-build', '-W', '-b', 'doctest', '-d', os.path.join('docs_build', 'build', 'doctrees'), 'docs_build', os.path.join('docs_build', 'doctest'), ) @nox.session(python='3.6') def lint(session): """Run flake8. Returns a failure if flake8 finds linting errors or sufficiently serious code quality issues. """ session.install('flake8') session.install('-e', '.') session.run( 'flake8', os.path.join('google', 'resumable_media'), 'tests', ) @nox.session(python='3.6') def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.install('docutils', 'Pygments') session.run( 'python', 'setup.py', 'check', '--restructuredtext', '--strict') @nox.session(python=['2.7', '3.6']) def system_tests(session): """Run the system test suite.""" # Sanity check: environment variables are set. missing = [] for env_var in SYSTEM_TEST_ENV_VARS: if env_var not in os.environ: missing.append(env_var) # Only run system tests if the environment variables are set. if missing: all_vars = ', '.join(missing) msg = 'Environment variable(s) unset: {}'.format(all_vars) session.skip(msg) # Install all test dependencies, then install this package into the # virutalenv's dist-packages. session.install('mock', 'pytest', REQUESTS, GOOGLE_AUTH) session.install('-e', '.') # Run py.test against the system tests. session.run( 'py.test', os.path.join('tests', 'system'), *session.posargs ) @nox.session(python='3.6') def cover(session): """Run the final coverage report. This outputs the coverage report aggregating coverage from the unit test runs (not system test runs), and then erases coverage data. """ session.install('coverage', 'pytest-cov') session.run('coverage', 'report', '--show-missing', '--fail-under=100') session.run('coverage', 'erase')
28.582278
75
0.63264
4a015de21b588d1e499d523d5cd405195e15d666
25
py
Python
otree_mturk_utils/__init__.py
chkgk/custom-waiting-page-for-mturk
47928641d89471061e63c424d12e83f3ad2153d0
[ "MIT" ]
4
2018-04-18T23:41:57.000Z
2019-06-27T11:24:46.000Z
otree_mturk_utils/__init__.py
chkgk/custom-waiting-page-for-mturk
47928641d89471061e63c424d12e83f3ad2153d0
[ "MIT" ]
10
2018-01-03T13:34:29.000Z
2021-06-10T20:19:26.000Z
otree_mturk_utils/__init__.py
chkgk/custom-waiting-page-for-mturk
47928641d89471061e63c424d12e83f3ad2153d0
[ "MIT" ]
4
2017-10-27T10:51:28.000Z
2020-01-21T13:32:25.000Z
__version__ = '0.0.3'
5
21
0.56
4a015e522df35b5ef4fedc2f490486f9b22626f9
2,189
py
Python
python/phonenumbers/data/region_FR.py
rodgar-nvkz/python-phonenumbers
4c7c4892211dbc9bc328bc3356b03853eaf993dc
[ "Apache-2.0" ]
1
2020-04-16T21:40:27.000Z
2020-04-16T21:40:27.000Z
python/phonenumbers/data/region_FR.py
rodgar-nvkz/python-phonenumbers
4c7c4892211dbc9bc328bc3356b03853eaf993dc
[ "Apache-2.0" ]
10
2020-03-24T10:47:53.000Z
2021-04-08T19:51:44.000Z
python/phonenumbers/data/region_FR.py
rodgar-nvkz/python-phonenumbers
4c7c4892211dbc9bc328bc3356b03853eaf993dc
[ "Apache-2.0" ]
1
2018-10-24T20:48:56.000Z
2018-10-24T20:48:56.000Z
"""Auto-generated file, do not edit by hand. FR metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_FR = PhoneMetadata(id='FR', country_code=33, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{8}', possible_length=(9,)), fixed_line=PhoneNumberDesc(national_number_pattern='(?:[1-35]\\d|4[1-9])\\d{7}', example_number='123456789', possible_length=(9,)), mobile=PhoneNumberDesc(national_number_pattern='700\\d{6}|(?:6\\d|7[3-9])\\d{7}', example_number='612345678', possible_length=(9,)), toll_free=PhoneNumberDesc(national_number_pattern='80[0-5]\\d{6}', example_number='801234567', possible_length=(9,)), premium_rate=PhoneNumberDesc(national_number_pattern='836(?:0[0-36-9]|[1-9]\\d)\\d{4}|8(?:1[2-9]|2[2-47-9]|3[0-57-9]|[569]\\d|8[0-35-9])\\d{6}', example_number='891123456', possible_length=(9,)), shared_cost=PhoneNumberDesc(national_number_pattern='8(?:1[01]|2[0156]|84)\\d{6}', example_number='884012345', possible_length=(9,)), voip=PhoneNumberDesc(national_number_pattern='9\\d{8}', example_number='912345678', possible_length=(9,)), uan=PhoneNumberDesc(national_number_pattern='80[6-9]\\d{6}', example_number='806123456', possible_length=(9,)), national_prefix='0', national_prefix_for_parsing='0', number_format=[NumberFormat(pattern='(\\d{4})', format='\\1', leading_digits_pattern=['10']), NumberFormat(pattern='(\\d{3})(\\d{3})', format='\\1 \\2', leading_digits_pattern=['1']), NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['8'], national_prefix_formatting_rule='0 \\1'), NumberFormat(pattern='(\\d)(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4 \\5', leading_digits_pattern=['[1-79]'], national_prefix_formatting_rule='0\\1')], intl_number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['8']), NumberFormat(pattern='(\\d)(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4 \\5', leading_digits_pattern=['[1-79]'])], mobile_number_portable_region=True)
99.5
199
0.66423
4a015fea30a1dd575da4afab49542e9682e63ad2
61,878
py
Python
python/paddle/distributed/passes/ps_trainer_pass.py
L-Net-1992/Paddle
4d0ca02ba56760b456f3d4b42a538555b9b6c307
[ "Apache-2.0" ]
11
2016-08-29T07:43:26.000Z
2016-08-29T07:51:24.000Z
python/paddle/distributed/passes/ps_trainer_pass.py
L-Net-1992/Paddle
4d0ca02ba56760b456f3d4b42a538555b9b6c307
[ "Apache-2.0" ]
null
null
null
python/paddle/distributed/passes/ps_trainer_pass.py
L-Net-1992/Paddle
4d0ca02ba56760b456f3d4b42a538555b9b6c307
[ "Apache-2.0" ]
1
2021-09-24T11:23:36.000Z
2021-09-24T11:23:36.000Z
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import paddle import paddle.compat as cpt from ..ps.utils.public import * from paddle.framework import core from paddle.distributed.passes.pass_base import PassBase, register_pass from paddle.fluid.transpiler.details.program_utils import delete_ops from paddle.fluid.transpiler.collective import SingleProcessMultiThread from _collections import deque, defaultdict from paddle.fluid.framework import Program, Parameter @register_pass("append_send_ops_pass") class AppendSendOpsPass(PassBase): # 该 pass 被多种模式复用 def __init__(self): super(AppendSendOpsPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _append_send_op(self, program, union_vars, queue, is_sparse, table_id, ps_mode): if queue == STEP_COUNTER: send_input_vars = [] else: send_input_vars = [ program.global_block().vars[union_var] for union_var in union_vars ] dummy_output = [] if ps_mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]: dummy_output = program.global_block().create_var( name=framework.generate_control_dev_var_name()) program.global_block().append_op(type="send", inputs={"X": send_input_vars}, outputs={"Out": dummy_output}, attrs={ "send_varnames": [queue], "is_sparse": is_sparse, "table_id": table_id, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) return dummy_output def _append_barrier_op(self, program, dummys, trainer_id): program.global_block().append_op(type="send_barrier", inputs={"X": dummys}, outputs={"Out": []}, attrs={ "trainer_id": trainer_id, "half_async": True, RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs ps_mode = attrs['ps_mode'] if ps_mode == DistributedMode.GEO: send_ctx = get_geo_trainer_send_context(attrs) # geo 模式 elif attrs['is_heter_ps_mode'] == True: print("is_heter_ps_mode in append_send_ops_pass!!") send_ctx = get_the_one_send_context(attrs, split_dense_table=True) else: send_ctx = get_the_one_send_context(attrs) # async、sync 等各种模式 dummys = [] for merged_name, send in send_ctx.items(): if send.is_sparse() and ps_mode != DistributedMode.GEO: continue if send.program_id() != id(attrs['loss'].block.program): continue is_sparse = 1 if send.is_sparse() else 0 is_sparse = 2 if send.is_distributed() else is_sparse dummys.append( self._append_send_op(main_program, send.origin_varnames(), merged_name, is_sparse, send.table_id(), ps_mode)) if ps_mode in [DistributedMode.SYNC, DistributedMode.HALF_ASYNC]: trainer_id = get_role_id(attrs['role_maker']) self._append_barrier_op(main_program, dummys, trainer_id) @register_pass("distributed_ops_pass") class DistributedOpsPass(PassBase): def __init__(self): super(DistributedOpsPass, self).__init__() self.w_2_table_id = {} self.emb_size = {} def _check_self(self): return True def _check_conflict(self, other_pass): return True def _push_sparse_fuse(self, _program, push_sparse_ops, attrs, use_cvm_op): if attrs['use_ps_gpu']: return if len(push_sparse_ops) == 0: return show = None clk = None use_entry = False for param, ops in push_sparse_ops.items(): op_first = ops[0] break if op_first.has_attr("entry"): entry = op_first.attr("entry") entry = entry.split(':') if len(entry) == 3 and entry[0] == 'show_click_entry': show_var_name = entry[1] click_var_name = entry[2] if show_var_name in _program.global_block( ).vars and click_var_name in _program.global_block().vars: show = _program.global_block().vars[show_var_name] clk = _program.global_block().vars[click_var_name] use_entry = True else: warnings.warn( 'ShowClickEntry configured, but cannot find show/click var, will not use' ) if not use_entry: print('ShowClickEntry not configured, will not use') show = _program.global_block().create_var( name="show", dtype=core.VarDesc.VarType.INT64, persistable=False, stop_gradient=True) _program.global_block()._insert_op(index=0, type='fill_constant', inputs={}, outputs={'Out': show}, attrs={ 'shape': [1], 'dtype': show.dtype, 'value': 1, }) clk = _program.global_block().create_var( name="clk", dtype=core.VarDesc.VarType.INT64, persistable=False, stop_gradient=True) _program.global_block()._insert_op(index=0, type='fill_constant', inputs={}, outputs={'Out': clk}, attrs={ 'shape': [1], 'dtype': clk.dtype, 'value': 0, }) for param, ops in push_sparse_ops.items(): all_ops = _program.global_block().ops op_idxs = [all_ops.index(op) for op in ops] inputs = [ _program.global_block().vars[op.input("Ids")[0]] for op in ops ] w = _program.global_block().vars[ops[0].output("W@GRAD")[0]] table_id = self.w_2_table_id[param] padding_idx = ops[0].attr("padding_idx") is_distributed = ops[0].attr("is_distributed") op_type = ops[0].type outputs = [ _program.global_block().vars[op.input("Out@GRAD")[0]] for op in ops ] for idx in op_idxs[::-1]: _program.global_block()._remove_op(idx) _program.global_block().append_op(type="distributed_push_sparse", inputs={ "Ids": inputs, 'W': w, "Outputs": outputs, "Shows": show, "Clicks": clk }, outputs={"Outputs": outputs}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "size": self.emb_size[param], "use_cvm_op": use_cvm_op }) def _pull_sparse_fuse(self, _program, pull_sparse_ops, attrs, send_ctx): def dag_check_up_and_reorder(program, inputs, outputs): global_block = program.global_block() min_output_index = len(global_block.ops) max_input_index = -1 input_indexes = [0] * len(global_block.ops) output_indexes = [0] * len(global_block.ops) for idx, op in enumerate(global_block.ops): for i in range(0, len(op.output_names)): if input_indexes[idx] == 1: break outs = op.output(op.output_names[i]) for in_id, in_var in enumerate(inputs): if in_var.name in outs: input_indexes[idx] = 1 max_input_index = max(max_input_index, idx) break for i in range(0, len(op.input_names)): if output_indexes[idx] == 1: break ins = op.input(op.input_names[i]) for out_id, out_var in enumerate(outputs): if out_var.name in ins: output_indexes[idx] = 1 min_output_index = min(min_output_index, idx) for i in range(len(global_block.ops)): if input_indexes[i] == 1 and output_indexes[i] == 1: warnings.warn( "unable to re-arrange dags order to combine distributed embedding ops because a op both needs embedding table's output as input and produces ids as the same embedding table's input" ) return if min_output_index < max_input_index: move_ops = [] for i in range(min_output_index + 1, len(input_indexes)): if input_indexes[i] == 1: move_ops.append((global_block.ops[i], i)) for i, op in enumerate(move_ops): queue = list() visited = set() queue.append(op[1]) visited.add(op[0]) start = 0 while start < len(queue): pos = queue[start] op = global_block.ops[pos] op_inputs = [] for k in range(0, len(op.input_names)): ins = op.input(op.input_names[k]) op_inputs.append(ins) for j in range(pos - 1, min_output_index - 1, -1): op1 = global_block.ops[j] if op1 in visited: continue found = False for k in range(0, len(op1.output_names)): outs = op1.output(op1.output_names[k]) for t in range(len(op_inputs)): for y in op_inputs[t]: if y in outs: found = True break if found: break if found: break if found: if output_indexes[j] == True: warnings.warn( "unable to re-arrange dags order to combine distributed embedding ops" ) return queue.append(j) visited.add(global_block.ops[j]) start = start + 1 queue.sort() for index in queue: desc = global_block.desc._insert_op(min_output_index) desc.copy_from(global_block.ops[index].desc) global_block.desc._remove_op(index + 1, index + 2) global_block.ops[index].desc = desc insert_op = global_block.ops.pop(index) input_state = input_indexes.pop(index) output_state = output_indexes.pop(index) global_block.ops.insert(min_output_index, insert_op) input_indexes.insert(min_output_index, input_state) output_indexes.insert(min_output_index, output_state) min_output_index = min_output_index + 1 assert global_block.desc.op_size() == len(global_block.ops) for i in range(len(global_block.ops)): assert global_block.desc.op(i) == global_block.ops[i].desc for param, ops in pull_sparse_ops.items(): all_ops = _program.global_block().ops op_device = "" if attrs['is_heter_ps_mode']: op_device = ops[0].attr("op_device") inputs = [ _program.global_block().vars[op.input("Ids")[0]] for op in ops ] w = _program.global_block().vars[ops[0].input("W")[0]] self.emb_size[param] = w.shape[1] grad_name = attrs['param_name_to_grad_name'][w.name] table_id = -1 for name, ctx in send_ctx.items(): if grad_name in ctx.origin_varnames(): table_id = ctx.table_id() if table_id == -1: raise ValueError( "can not find suitable sparse table, please check") self.w_2_table_id[param] = table_id padding_idx = ops[0].attr("padding_idx") is_distributed = ops[0].attr("is_distributed") op_type = ops[0].type outputs = [ _program.global_block().vars[op.output("Out")[0]] for op in ops ] dag_check_up_and_reorder(_program, inputs, outputs) op_idxs = [all_ops.index(op) for op in ops] for idx in op_idxs[::-1]: _program.global_block()._remove_op(idx) inputs_idxs = [-1] * len(inputs) outputs_idxs = [len(_program.global_block().ops) + 1] * len(outputs) for idx, op in enumerate(_program.global_block().ops): for i in range(0, len(op.output_names)): outs = op.output(op.output_names[i]) for in_id, in_var in enumerate(inputs): if in_var.name in outs: inputs_idxs[in_id] = max(idx, inputs_idxs[in_id]) for i in range(0, len(op.input_names)): ins = op.input(op.input_names[i]) for out_id, out_var in enumerate(outputs): if out_var.name in ins: outputs_idxs[out_id] = min(idx, outputs_idxs[out_id]) if min(outputs_idxs) - max(inputs_idxs) >= 1: if max(inputs_idxs) == -1: distributed_idx = min(op_idxs) else: distributed_idx = max(inputs_idxs) + 1 if attrs['use_ps_gpu']: _program.global_block()._insert_op( index=distributed_idx, type="pull_gpups_sparse", inputs={ "Ids": inputs, 'W': w }, outputs={"Out": outputs}, attrs={ "size": [w.shape[1] for i in inputs], "is_distributed": True, "is_sparse": True }) else: _program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", inputs={ "Ids": inputs, 'W': w }, outputs={"Outputs": outputs}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "lookup_table_version": op_type, "op_device": op_device }) else: for i in range(len(inputs_idxs)): distributed_idx = op_idxs[i] _program.global_block()._insert_op( index=distributed_idx, type="distributed_lookup_table", inputs={ "Ids": [inputs[i]], 'W': w }, outputs={"Outputs": [outputs[i]]}, attrs={ "is_distributed": is_distributed, "padding_idx": padding_idx, "table_id": table_id, "lookup_table_version": op_type, "op_device": op_device }) def _get_pull_sparse_ops(self, _program, attrs): pull_sparse_ops = {} pull_sparse_ids = {} push_sparse_ops = {} ops = {} use_cvm_op = False for op in _program.global_block().ops: if op.type in SPARSE_OP_TYPE_DICT.keys() \ and op.attr('remote_prefetch') is True: param_name = op.input(SPARSE_OP_TYPE_DICT[op.type])[0] if attrs['is_heter_ps_mode']: # trick for matchnet, need to modify param_name += op.input("Ids")[0][0] ops = pull_sparse_ops.get(param_name, []) ops.append(op) pull_sparse_ops[param_name] = ops ids = pull_sparse_ids.get(param_name, []) ids.append(op.input("Ids")[0]) pull_sparse_ids[param_name] = ids if op.type == 'cvm': use_cvm_op = True for op in _program.global_block().ops: if op.type in SPARSE_GRAD_OP_TYPE_DICT.keys(): param_name = op.input(SPARSE_GRAD_OP_TYPE_DICT[op.type])[0] if param_name in pull_sparse_ids and op.input( "Ids")[0] in pull_sparse_ids[param_name]: ops = push_sparse_ops.get(param_name, []) ops.append(op) push_sparse_ops[param_name] = ops return pull_sparse_ops, push_sparse_ops, use_cvm_op def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs pull_sparse_ops, push_sparse_ops, use_cvm_op = self._get_pull_sparse_ops( main_program, attrs) print("is_heter_ps_mode in distributed_ops_pass {}?".format( attrs['is_heter_ps_mode'])) send_ctx = get_the_one_send_context( attrs, split_dense_table=attrs['is_heter_ps_mode']) self._pull_sparse_fuse(main_program, pull_sparse_ops, attrs, send_ctx) self._push_sparse_fuse(main_program, push_sparse_ops, attrs, use_cvm_op) @register_pass("delete_optimizer_pass") class DeleteOptimizesPass(PassBase): def __init__(self): super(DeleteOptimizesPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _delete_optimizer_op_and_vars(self, _program, optimize_ops): optimize_vars = [] optimize_op_role_vars = [] optimize_need_delete_vars = [] for op in optimize_ops: optimize_vars.extend(op.input_arg_names) optimize_op_role_vars.extend(op.attr("op_role_var")) optimize_vars = list(set(optimize_vars)) optimize_op_role_vars = list(set(optimize_op_role_vars)) for var in optimize_vars: if var not in optimize_op_role_vars: optimize_need_delete_vars.append(var) need_delete_optimize_vars = list(set(optimize_need_delete_vars)) delete_ops(_program.global_block(), optimize_ops) for var in need_delete_optimize_vars: if _program.global_block().has_var(var): _program.global_block()._remove_var(var) def _add_lr_var(self, main_program, attrs): # Todo: hard code for pe lr_var = attrs['origin_main_program'].global_block( ).vars["learning_rate_0"] main_program.global_block().create_var(name=lr_var.name, shape=lr_var.shape, dtype=lr_var.dtype, type=lr_var.type, lod_level=lr_var.lod_level, persistable=True) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs optimizer_ops = get_optimize_ops(main_program) lr_ops = get_lr_ops(main_program) optimizer_ops.extend(lr_ops) self._delete_optimizer_op_and_vars(main_program, optimizer_ops) if hasattr(attrs['origin_main_program'], 'lr_sheduler'): self._add_lr_var(main_program, attrs) @register_pass("delete_extra_optimizer_pass") class DeleteExtraOptimizerPass(PassBase): def __init__(self): super(DeleteExtraOptimizerPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs optimize_vars = [] optimize_op_role_vars = [] optimize_need_delete_vars = [] for op in get_optimize_ops(main_program): optimize_vars.extend(op.input_arg_names) optimize_op_role_vars.extend(op.attr("op_role_var")) optimize_vars = list(set(optimize_vars)) optimize_op_role_vars = list(set(optimize_op_role_vars)) for var in optimize_vars: if var not in optimize_op_role_vars: optimize_need_delete_vars.append(var) need_delete_optimize_vars = list(set(optimize_need_delete_vars)) init_ops = [] for var in need_delete_optimize_vars: param_init_op = [] for op in startup_program.global_block().ops: if var in op.output_arg_names: param_init_op.append(op) init_ops.extend(param_init_op) delete_ops(startup_program.global_block(), init_ops) for var in need_delete_optimize_vars: if startup_program.global_block().has_var(var): startup_program.global_block()._remove_var(var) @register_pass("fake_init_ops_pass") class FakeInitOpsPass(PassBase): def __init__(self): super(FakeInitOpsPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _get_sparse_table_names(self, attrs): dist_varnames = get_sparse_tablenames(attrs['origin_main_programs'], True) sparse_varnames = get_sparse_tablenames(attrs['origin_main_programs'], False) return list(set(dist_varnames + sparse_varnames)) def _fake_init_sparsetable(self, program, sparse_table_names): # delete table init op for table_name in sparse_table_names: table_var = program.global_block().vars[table_name] table_param_init_op = [] for op in program.global_block().ops: if table_name in op.output_arg_names: table_param_init_op.append(op) init_op_num = len(table_param_init_op) if init_op_num != 1: raise ValueError("table init op num should be 1, now is " + str(init_op_num)) table_init_op = table_param_init_op[0] program.global_block().append_op( type="fake_init", inputs={}, outputs={"Out": table_var}, attrs={"shape": table_init_op.attr('shape')}) delete_ops(program.global_block(), table_param_init_op) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs sparse_tables = self._get_sparse_table_names(attrs) self._fake_init_sparsetable(startup_program, sparse_tables) @register_pass("ps_gpu_pass") class PsGpuPass(PassBase): def __init__(self): super(PsGpuPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _add_push_box_sparse_op(self, program): insert_index = -1 for idx, op in list(enumerate(program.global_block().ops)): if op.type == "lookup_table_grad": insert_index = idx for op in program.global_block().ops: if op.type != "pull_box_sparse" and op.type != "pull_gpups_sparse": continue grad_op_desc, op_grad_to_var = core.get_grad_op_desc( op.desc, cpt.to_text(set()), []) for op_desc in grad_op_desc: new_op_desc = program.global_block().desc._insert_op( insert_index + 1) new_op_desc.copy_from(op_desc) new_op_desc._set_attr(op_role_attr_name, backward) new_op = paddle.fluid.framework.Operator( program.global_block(), new_op_desc) program.global_block().ops.insert(insert_index + 1, new_op) program.global_block()._sync_with_cpp() def _remove_optimizer_var(self, program): embedding_w = {} for idx, op in list(enumerate(program.global_block().ops)): if op.type == "lookup_table_grad": for name in op.input("W"): embedding_w[name] = 1 optimize_vars = [] optimize_op_role_vars = [] optimize_need_delete_vars = [] for op in get_optimize_ops(program): for name in op.input("Param"): if name in embedding_w: optimize_op_role_vars.extend(op.attr("op_role_var")) for key_name in op.input_names: if key_name == "LearningRate": continue for var in op.input(key_name): optimize_vars.append(var) optimize_vars = list(set(optimize_vars)) optimize_op_role_vars = list(set(optimize_op_role_vars)) for var in optimize_vars: if var not in optimize_op_role_vars: optimize_need_delete_vars.append(var) need_delete_optimize_vars = list(set(optimize_need_delete_vars)) for name in need_delete_optimize_vars: if program.global_block().has_var(name): program.global_block()._remove_var(name) def _remove_lookup_table_grad_op_and_var(self, program): lookup_table_grad_var = {} remove_op_index = [] remove_var = [] for idx, op in list(enumerate(program.global_block().ops)): if op.type == "lookup_table_grad": for name in op.output("W@GRAD"): lookup_table_grad_var[name] = 1 remove_op_index.append(idx) remove_var.append(name) for name in op.input("W"): lookup_table_grad_var[name] = 1 for idx, op in list(enumerate(program.global_block().ops)): if op.type == "pull_box_sparse" or op.type == "pull_gpups_sparse": continue for key_name in op.input_names: for var in op.input(key_name): if var in lookup_table_grad_var: remove_op_index.append(idx) break remove_op_index = list(set(remove_op_index)) remove_op_index.sort(reverse=True) for idx in remove_op_index: program.global_block()._remove_op(idx) for name in remove_var: program.global_block()._remove_var(name) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs self._add_push_box_sparse_op(main_program) self._remove_optimizer_var(main_program) self._remove_lookup_table_grad_op_and_var(main_program) @register_pass("ps_transpile_pass") class PsTranspilePass(PassBase): def __init__(self): super(PsTranspilePass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs t = SingleProcessMultiThread() env = get_dist_env() t.transpile(startup_program=startup_program, main_program=main_program, rank=env["trainer_id"], endpoints=env["trainer_endpoints"], current_endpoint=env['current_endpoint'], wait_port=False) @register_pass("split_heter_worker_ops_pass") class SplitHeterWorkerOpsPass(PassBase): def __init__(self): super(SplitHeterWorkerOpsPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _create_heter_program(self, program, attrs, heter_program, program_block_ops_list, heter_ops, block_var_detail): # This function mainly includes the following contents: # 1. For every heter block: # a) copy heter device op from origin program # b) create variables which belong to heter op: # -> if variable is persistable, clone it in global_scope # -> if variable is temp, create it in heter block # c) create communicate related op as follow: # joint_var.0_1 -> slice -> reshape -> origin_var # origin_var -> origin_program # reshape -> concat -> joint_var.1_2 # d) copy send op from origin program for var@grad which loacted in current heter block # e) re-check every op in current blcok if its device is not current heter devie # 2. Create send op for step counter in last heter-block # 3. Create Listen&Serv OP and Send&Recv OP for distributed training # 4. update CompileTimeStrategy for heter_program optimizer_block = [] grad_to_block_id = [] send_grad_var_list = [] pre_block_idx = heter_program.num_blocks - 1 role_maker = attrs['role_maker'] current_device = role_maker._heter_device_type().lower() stage_id = int(role_maker._get_stage_id()) heter_block_ops_forward = program_block_ops_list[stage_id - 1]["forward"] heter_block_ops_backward = program_block_ops_list[stage_id - 1]["backward"] heter_block = heter_program._create_block(pre_block_idx) optimizer_block.append(heter_block) for _, op in enumerate(heter_block_ops_forward): block_append_op(heter_program, program, heter_block, op) entrance_vars = block_var_detail[stage_id - 1]["forward"]["entrance"] add_vars_by_var_list(entrance_vars, program, heter_program, heter_block) exit_vars = block_var_detail[stage_id - 1]["forward"]["exit"] add_vars_by_var_list(exit_vars, program, heter_program, heter_block) first_op_index_fp = len(heter_block.ops) if stage_id < len(program_block_ops_list): heter_block_bp = heter_program._create_block(pre_block_idx) optimizer_block.append(heter_block_bp) for _, op in enumerate(heter_block_ops_backward): block_append_op(heter_program, program, heter_block_bp, op) bp_entrance_vars = block_var_detail[stage_id - 1]["backward"]["entrance"] add_vars_by_var_list(bp_entrance_vars, program, heter_program, heter_block_bp) bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"] add_vars_by_var_list(bp_exit_vars, program, heter_program, heter_block_bp) backward_comm_info = get_communicate_var_info(program, stage_id, bp_entrance_vars, type="backward") grad_to_block_id.append(backward_comm_info["block_input_var_name"] + ":" + str(heter_block_bp.idx)) else: for _, op in enumerate(heter_block_ops_backward): block_append_op(heter_program, program, heter_block, op) bp_entrance_vars = block_var_detail[stage_id - 1]["backward"]["entrance"] add_vars_by_var_list(bp_entrance_vars, program, heter_program, heter_block) bp_exit_vars = block_var_detail[stage_id - 1]["backward"]["exit"] add_vars_by_var_list(bp_exit_vars, program, heter_program, heter_block) heter_block_bp = heter_block forward_comm_info = get_communicate_var_info(program, stage_id, entrance_vars, type="forward") grad_to_block_id.append(forward_comm_info["block_input_var_name"] + ":" + str(heter_block.idx)) first_op_index_bp = len(heter_block_bp.ops) if stage_id <= len(block_var_detail) - 1: static_var = insert_communicate_op(program, role_maker, heter_block, stage_id, first_op_index_fp, block_var_detail, current_device) static_var_bp = insert_communicate_op(program, role_maker, heter_block_bp, stage_id, first_op_index_bp, block_var_detail, current_device, False) # add send op send_grad_var_list = add_send_op( program, heter_block_bp, block_var_detail[stage_id - 1]["backward"]["persistables"]) # add step conter send_input_vars = [] dummy_output = [] pserver_endpoints = get_ps_endpoints(role_maker) attrs = { "message_to_block_id": grad_to_block_id, "optimize_blocks": optimizer_block, # runtime attribute "endpoint": get_heter_worker_endpoint(role_maker), "fanin": len(get_previous_stage_trainers(role_maker)), "pserver_id": get_role_id(role_maker), "distributed_mode": attrs['ps_mode'], "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE } # append the listen_and_serv op heter_program.global_block().append_op(type="heter_listen_and_serv", inputs={'X': []}, outputs={}, attrs=attrs) # TODO check heter program def _apply_single_impl(self, main_program, startup_program, pass_ctx): """ split heter worker program from origin-program 1. find heter op (located on different device) 2. find input&output of every heter-block 3. create heter worker program, add listen&serv op """ attrs = pass_ctx._attrs default_deveice = "cpu" program, heter_ops, _, program_block_ops = find_heter_ops( main_program, default_deveice) if len(heter_ops) == 0: warnings.warn( "Currently running in Heter Parameter Server mode, but no OP running on heterogeneous devices, Please check your code." ) main_program = program return program_block_ops = union_forward_gradient_op(program_block_ops) block_vars_detail = find_block_joints(program, program_block_ops, heter_ops) heter_program = framework.Program() self._create_heter_program(program, attrs, heter_program, program_block_ops, heter_ops, block_vars_detail) main_program = heter_program @register_pass("split_trainer_ops_pass") class SplitTrainerOpsPass(PassBase): def __init__(self): super(SplitTrainerOpsPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _replace_ops_by_communicate_op(self, program, attrs, heter_block_index, ops_list, block_var_detail): all_op = program.global_block().ops start_op = ops_list[0] first_op_idx = -1 for op in all_op: if str(op) == str(start_op): first_op_idx = all_op.index(op) break assert first_op_idx != -1 delete_same_ops(program.global_block(), ops_list) entrance_var = [] role_maker = attrs['role_maker'] if heter_block_index == 1: next_heter_worker_endpoints = get_next_stage_trainers(role_maker) entrance_var = block_var_detail[heter_block_index]["forward"][ "entrance"] comm_info = get_communicate_var_info(program, heter_block_index + 1, entrance_var) program.global_block()._insert_op( index=first_op_idx, type="send_and_recv", inputs={"X": program.global_block().vars[entrance_var[0]]}, outputs={"Out": []}, attrs={ "mode": "forward", "send_var_name": entrance_var + ["microbatch_id"], "recv_var_name": [], "message_name": comm_info["block_input_var_name"], "next_endpoints": next_heter_worker_endpoints, "previous_endpoints": [], "trainer_id": get_role_id(role_maker), RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) return entrance_var def _remove_var_pair_by_grad(self, var_name, attrs): for index, pair in enumerate(attrs['merged_variables_pairs']): var = pair[0] var_grad = pair[1] if var_grad.merged_var.name == var_name: del attrs['merged_variables_pairs'][index] for index, pair in enumerate(attrs['merged_dense_pairs']): var = pair[0] var_grad = pair[1] if var_grad.merged_var.name == var_name: del attrs['merged_dense_pairs'][index] return for index, pair in enumerate(attrs['merged_sparse_pairs']): var = pair[0] var_grad = pair[1] if var_grad.merged_var.name == var_name: del attrs['merged_sparse_pairs'][index] return def _remove_trainer_send_op(self, program, attrs, heter_block_index, block_var_detail): # if trainer do FF->BP->SEND, it has follow vars: var, var@GRAD # if trainer only do SEND, it has one var: var@GRAD # Delete Send op ,if trainer doesn't has pair var (var<->var@GRAD) persistables = block_var_detail[heter_block_index]["forward"]["persistables"] + \ block_var_detail[heter_block_index]["backward"]["persistables"] need_remove_send_op = [] need_remove_grad_var = [] for op in find_send_op(program): input_list, _ = find_op_input_output(program, program.global_block(), op) for var_name in input_list: origin_var_name = var_name.split("@GRAD")[0] if origin_var_name in persistables: need_remove_send_op.append(op) need_remove_grad_var.append(var_name) need_remove_send_op = list(set(need_remove_send_op)) delete_ops(program.global_block(), need_remove_send_op) for grad_var_name in need_remove_grad_var: self._remove_var_pair_by_grad(grad_var_name, attrs) def _create_trainer_program(self, program, origin_program, attrs, program_block_ops_list, block_var_detail): # This function mainly includes the following contents: # 1. For every heter block in origin program # a) delete heter op and related variables # b) add send&recv op # c) add communicate ops as follows: # origin_var -> reshape -> concat -> joint_var.0_1 # send&recv op(send joint_var.0_1; recv joint_var.1_2) # joint_var.1_2 -> slice -> reshape -> origin_var # d) remove send op which related var@grad is not in trainer program # 2. check every op's device static_var = [] for heter_block_index in range(1, len(program_block_ops_list)): ops_list = program_block_ops_list[heter_block_index][ "forward"] + program_block_ops_list[heter_block_index][ "backward"] static_var += self._replace_ops_by_communicate_op( program, attrs, heter_block_index, ops_list, block_var_detail) self._remove_trainer_send_op(program, attrs, heter_block_index, block_var_detail) optimizer_block = [] grad_to_block_id = [] bp_ops_list = program_block_ops_list[0]["backward"] delete_same_ops(program.global_block(), bp_ops_list) delete_trainer_useless_var(program, static_var) backward_block = create_backward_block(program, origin_program, bp_ops_list, block_var_detail) bp_entrance_vars = block_var_detail[0]["backward"]["entrance"] backward_comm_info = get_communicate_var_info(origin_program, 1, bp_entrance_vars, type="backward") grad_to_block_id.append(backward_comm_info["block_input_var_name"] + ":" + str(backward_block.idx)) optimizer_block.append(backward_block) role_maker = attrs['role_maker'] attrs = { "message_to_block_id": grad_to_block_id, "optimize_blocks": optimizer_block, # runtime attribute "endpoint": get_trainer_endpoint(role_maker), ## get trainer endpoint "fanin": 0, ## get heter worker "pserver_id": get_role_id(role_maker), "distributed_mode": attrs['ps_mode'], "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE } # append the listen_and_serv op program.global_block()._insert_op(index=0, type="heter_listen_and_serv", inputs={'X': []}, outputs={}, attrs=attrs) ## TODO add check for bp block #check_op_device(program.global_block(), DEFAULT_DEVICE) def _apply_single_impl(self, main_program, startup_program, pass_ctx): """ split cpu-trainer program from origin-program 1. find heter op (located on different device) 2. find input&output of every heter-block 3. create cpu-trainer program, add send&recv op """ attrs = pass_ctx._attrs default_device_ = 'cpu' program, heter_ops, default_ops, program_block_ops = find_heter_ops( main_program, default_device_) program_block_ops = union_forward_gradient_op(program_block_ops) block_vars_detail = find_block_joints(program, program_block_ops, heter_ops) trainer_program = program.clone() self._create_trainer_program(trainer_program, program, attrs, program_block_ops, block_vars_detail) main_program = trainer_program @register_pass("set_heter_pipeline_opt_pass") class SetHeterPipelineOptPass(PassBase): def __init__(self): super(SetHeterPipelineOptPass, self).__init__() def _check_self(self): return True def _check_conflict(self, other_pass): return True def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs role_maker = attrs['role_maker'] num_microbatches = attrs['user_defined_strategy'].pipeline_configs[ 'accumulate_steps'] startup_program._heter_pipeline_opt = { "startup_program": startup_program, "pipeline_stage": int(role_maker._get_stage_id()) - 1, "heter_place": role_maker._heter_device(), "is_fl_mode": 1 } main_program._heter_pipeline_opt = { "trainer": "HeterPipelineTrainer", "device_worker": "HeterSection", "trainers": role_maker._get_stage_trainers(), ## trainer num in each stage "trainer_id": int(role_maker._role_id()), "pipeline_stage": int(role_maker._get_stage_id()) - 1, "num_pipeline_stages": int(role_maker._get_num_stage()), "section_program": main_program, "num_microbatches": num_microbatches, "heter_place": role_maker._heter_device(), "is_fl_mode": 1 } @register_pass("split_fl_ops_pass") class SplitFlOpsPass(PassBase): def __init__(self): super(SplitFlOpsPass, self).__init__() self.PART_A_DEVICE_FlAG = 'gpu:0' self.PART_A_JOINT_OP_DEVICE_FlAG = 'gpu:2' self.PART_B_DEVICE_FlAG = 'gpu:1' self.PART_B_JOINT_OP_DEVICE_FlAG = 'gpu:3' def _check_self(self): return True def _check_conflict(self, other_pass): return True def _insert_encrypt_op(self): pass def _insert_decrypt_op(self): pass def _clear_op_device_flag(self, program): for block in program.blocks: for op in block.ops: device = op.attr(OP_DEVICE_KEY) op._set_attr(OP_DEVICE_KEY, '') if device != '' else None def _split_fl_program(self): self.partA_ops = [] self.partB_ops = [] party_program_map = defaultdict(Program) block = self.ori_main_program.block(0) for op in block.ops: device = op.attr(OP_DEVICE_KEY) if device == self.PART_A_DEVICE_FlAG or device == '' or device == self.PART_A_JOINT_OP_DEVICE_FlAG: program = party_program_map['a'] self.partA_ops.append(op) elif device == self.PART_B_DEVICE_FlAG or device == self.PART_B_JOINT_OP_DEVICE_FlAG: program = party_program_map['b'] self.partB_ops.append(op) op_desc = op.desc ap_op = program.global_block().desc.append_op() ap_op.copy_from(op_desc) ap_op._set_attr(OP_DEVICE_KEY, device) for key in ['a', 'b']: program = party_program_map[key] program._sync_with_cpp() return party_program_map def _insert_partA_communicate_op(self, block, idx): comm_info = "forward_joint_{}_{}@fl_ps".format(1, 2) block._insert_op( idx, type='send_and_recv', inputs={'X': self.partA_to_partB_tensor}, outputs={'Out': []}, attrs={ 'mode': 'forward', # mode 直接关联前向和反向 channel 选择 'send_var_name': self.partA_to_partB_tensor_name + ["microbatch_id"], 'recv_var_name': [], 'message_name': comm_info, 'next_endpoints': get_next_stage_trainers(self.role_maker), # partB_endpoints 'previous_endpoints': get_previous_stage_trainers(self.role_maker), 'trainer_id': get_role_id(self.role_maker), # global id RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) return def _insert_partB_communicate_op(self, block, idx): comm_info = ("backward_joint_{}_{}@fl_ps".format(2, 1)) block._insert_op( idx, type='send_and_recv', inputs={'X': self.partB_to_partA_grad}, outputs={'Out': []}, attrs={ 'mode': 'backward', 'send_var_name': self.partB_to_partA_grad_name + ["microbatch_id"], 'recv_var_name': [], 'message_name': comm_info, 'next_endpoints': get_next_stage_trainers(self.role_maker), # partA_endpoints 'previous_endpoints': get_previous_stage_trainers(self.role_maker), 'trainer_id': get_role_id(self.role_maker), # global id RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) return def _create_var_for_block(self, vars, block): for var in vars: if block._find_var_recursive(str(var)): continue source_var = self.ori_main_block._var_recursive(str(var)) if isinstance(var, Parameter): dest_var = block.create_parameter( name=source_var.name, shape=source_var.shape, dtype=source_var.dtype, type=source_var.type, lod_level=source_var.lod_level, stop_gradient=source_var.stop_gradient, trainable=source_var.trainable, optimize_attr=source_var.optimize_attr, regularizer=source_var.regularizer, error_clip=source_var.error_clip) else: dest_var = block._clone_variable(source_var, False) dest_var.stop_gradient = source_var.stop_gradient if hasattr(source_var, 'is_distributed'): dest_var.is_distributed = source_var.is_distributed def _get_block_by_idx(self, op_list, program, block_idx): if block_idx < len(program.blocks): new_block = program.block(block_idx) else: new_block = program._create_block() for _, op in enumerate(op_list): ap_op = new_block.desc.append_op() ap_op.copy_from(op.desc) ap_op._set_attr(OP_DEVICE_KEY, op.attr(OP_DEVICE_KEY)) vars = op.desc.input_arg_names() + op.desc.output_arg_names() self._create_var_for_block(vars, new_block) new_block._sync_with_cpp() return new_block def _find_joint_forward_op(self, block, flag): op_idx = 0 for op in block.ops: if is_forward_op(op) and op.attr(OP_DEVICE_KEY) == flag: return op_idx else: op_idx += 1 return op_idx def _find_joint_backward_op(self, block, flag): op_idx = 0 for op in block.ops: if is_backward_op(op) and op.attr(OP_DEVICE_KEY) == flag: return op_idx else: op_idx += 1 return op_idx def _get_partB_to_partA_grad(self, block, flag): op_idx = self._find_joint_backward_op(block, flag) op = block.ops[op_idx] vars1 = op.desc.input_arg_names() op_idx = self._find_joint_forward_op(block, flag) op = block.ops[op_idx] vars2 = op.desc.output_arg_names() self.partB_to_partA_grad_name = list(set(vars1) - set(vars2)) self.partB_to_partA_grad = [] for var_name in self.partB_to_partA_grad_name: self.partB_to_partA_grad.append(self.ori_main_block.var(var_name)) def _find_dense_grad_vars(self, bp_op_list): program = self.ori_main_program bp_op_input, bp_op_output = find_ops_list_input_output( program, bp_op_list) return (screen_persistables(program, bp_op_input) + screen_persistables(program, bp_op_output)) def _get_partA_program(self, block): # 1. create block 0 # 1.1 insert send op op_idx = self._find_joint_forward_op(block, self.PART_A_JOINT_OP_DEVICE_FlAG) op_list = [] for i in range(len(block.ops)): op = block.ops[i] op_list.append(op) if i == op_idx: out_name = op.desc.output_arg_names()[0] self.partA_to_partB_tensor_name = op.desc.output_arg_names() self.partA_to_partB_tensor = self.ori_main_block.var(out_name) break first_block = self._get_block_by_idx(op_list, self.partA_program, 0) self._insert_partA_communicate_op(first_block, op_idx + 1) # logger.info('partA-first_block:{}'.format(first_block)) # 2. create block 1 bp_op_list = get_bp_op_list(block) push_sparse_op_list = get_distributed_push_sparse_op_list(block) # logger.info('bp_op_list: {}'.format(bp_op_list)) second_block = self._get_block_by_idx(bp_op_list + push_sparse_op_list, self.partA_program, 1) # 2.1. insert partA recv op block_input_flag = "backward_joint_{}_{}@fl_ps".format(2, 1) grad_to_block_id = block_input_flag + ":" + str(second_block.idx) attrs = { "message_to_block_id": [grad_to_block_id], "optimize_blocks": [second_block], "endpoint": get_trainer_endpoint(self.role_maker), ## "fanin": 0, "pserver_id": get_role_id(self.role_maker), "distributed_mode": self.ps_mode, "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE } second_block._insert_op(index=0, type='heter_listen_and_serv', inputs={'X': []}, outputs={}, attrs=attrs) # 2.2 insert push dense grad op send_ops = find_send_op(self.ori_main_program) # push dense delete_same_ops(block, send_ops) dense_grad_vars = self._find_dense_grad_vars(bp_op_list) add_send_op(self.ori_main_program, second_block, dense_grad_vars) # logger.info('partA-second_block:{}'.format(second_block)) def _get_partB_program(self, block): op_idx1 = self._find_joint_forward_op( block, self.PART_B_JOINT_OP_DEVICE_FlAG) # elementwise_add op op_idx2 = self._find_joint_backward_op(block, self.PART_B_JOINT_OP_DEVICE_FlAG) op_cnt = 0 op_list1 = [] op_list2 = [] op_list3 = [] for op in block.ops: if op_cnt < op_idx1: op_list1.append(op) elif op_cnt <= op_idx2: op_list2.append(op) else: op_list3.append(op) op_cnt += 1 # 1. create block 0 first_block = self._get_block_by_idx(op_list1, self.partB_program, 0) # 2. create block 1 second_block = self._get_block_by_idx(op_list2, self.partB_program, 1) # 2.1 insert send op self._insert_partB_communicate_op(second_block, len(op_list2)) # 2.2 insert remain ops second_block = self._get_block_by_idx(op_list3, self.partB_program, 1) # 2.3 insert push dense grad op bp_op_list = get_bp_op_list(second_block) dense_grad_vars = self._find_dense_grad_vars(bp_op_list) add_send_op(self.ori_main_program, second_block, dense_grad_vars) # 3. insert partB recv op block_input_flag = "forward_joint_{}_{}@fl_ps".format(1, 2) grad_to_block_id = block_input_flag + ":" + str(second_block.idx) attrs = { "message_to_block_id": [grad_to_block_id], "optimize_blocks": [second_block], ## what to do? "endpoint": get_heter_worker_endpoint(self.role_maker), "fanin": len(get_previous_stage_trainers(self.role_maker)), "pserver_id": 1, # TODO "distributed_mode": self.ps_mode, "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE } first_block._insert_op(index=len(op_list1), type="heter_listen_and_serv", inputs={'X': []}, outputs={}, attrs=attrs) #logger.info('partB-first_block:{}'.format(first_block)) #logger.info('partB-second_block:{}'.format(second_block)) def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs self.role_maker = attrs['role_maker'] self.ps_mode = attrs['ps_mode'] self.is_part_b = attrs['is_heter_worker'] # TODO self.ori_main_program = main_program self.ori_main_block = main_program.block(0) party_program_map = self._split_fl_program() prog_a = party_program_map['a'] _main_file = ps_log_root_dir + '6_fl_A_main_program.prototxt' debug_program(_main_file, prog_a) self._get_partB_to_partA_grad(prog_a.global_block(), self.PART_A_JOINT_OP_DEVICE_FlAG) prog_b = party_program_map['b'] _main_file = ps_log_root_dir + '6_fl_B_main_program.prototxt' debug_program(_main_file, prog_b) if not self.is_part_b: self.partA_program = framework.Program() self._get_partA_program(prog_a.global_block()) pass_ctx._attrs['part_a_main_program'] = self.partA_program self._clear_op_device_flag(self.partA_program) check_program(self.partA_program) else: self.partB_program = framework.Program() self._get_partB_program(prog_b.global_block()) pass_ctx._attrs['part_b_main_program'] = self.partB_program self._clear_op_device_flag(self.partB_program) check_program(self.partB_program)
43.210894
205
0.544555
4a016064cb187681c7631c7c544c538d88c63a82
3,774
py
Python
code/redis_support_py3/mqtt_message_processing_py3.py
NanoDataCenter/nano_data_center
76ad521e1a5139a37df80214af1413d2fd4ade60
[ "MIT" ]
2
2018-02-21T03:46:51.000Z
2019-12-24T16:40:51.000Z
code/redis_support_py3/mqtt_message_processing_py3.py
NanoDataCenter/nano_data_center
76ad521e1a5139a37df80214af1413d2fd4ade60
[ "MIT" ]
7
2020-07-16T19:54:08.000Z
2022-03-02T03:29:07.000Z
code/redis_support_py3/mqtt_message_processing_py3.py
NanoDataCenter/nano_data_center
76ad521e1a5139a37df80214af1413d2fd4ade60
[ "MIT" ]
2
2018-04-16T07:02:35.000Z
2020-07-23T21:57:19.000Z
import time class MQTT_Message_Processing(object): def __init__(self): self.message_handlers = {} self.message_handlers["analog_input" ] = self.process_analog_input self.message_handlers["flat" ] = self.process_flat_input self.message_handlers["pulse_flow" ] = self.process_pulse_flow self.analog_handlers = {} self.analog_handlers["analog" ] = self.process_raw_analog self.analog_handlers["pressure_gauge" ] = self.process_pressure_gauge self.analog_handlers["rms_current_transformer" ] = self.process_current_transformer def process_mqtt_message(self,data_def,data_key,data): if data_def["type"] in self.message_handlers: return self.message_handlers[data_def["type"]](data_def,data_key,data) else: raise ValueError("unsupported message type") def find_definition_record(self,data_key,data_def): for i in data_def["fields"]: if i["name"] == data_key: return i raise ValueError("no matching data key") def process_analog_input(self,data_def,data_key,total_data): if data_key == None: raise ValueError("requires data_key") definition_record = self.find_definition_record(data_key,data_def) measurement_key = data_def["main_field"] data = total_data[str.encode(measurement_key)] return self.analog_handlers[definition_record["type"]](definition_record,data) def process_raw_analog(self,definition_record,raw_data): sensor_field = str.encode(definition_record["channel_field"]) sensor_element = definition_record["channel_value"] for i in raw_data: if i[sensor_field] == sensor_element: #print("sensor data",i[b"DC"] ) return i[b"DC"] raise ValueError("sensor field not found") def process_pressure_gauge(self,definition_record,raw_data): sensor_data = self.process_raw_analog(definition_record,raw_data) reduction = definition_record["reduction"] range = definition_record["range"] sensor_data = (sensor_data*reduction) -.5 sensor_data = (sensor_data/4.0)*range return sensor_data def process_current_transformer(self,definition_record,raw_data): sensor_data = self.process_raw_analog(definition_record,raw_data) resistor = definition_record["resistor"] range = definition_record["range"] current = sensor_data/resistor current = current - .004 value = current/.016*range return value #{"name":"MAIN_FLOW_METER", "GPIO_PIN":5,"data_field":"COUNTS","conversion":4./2./60./2.0 }, def process_pulse_flow(self,data_def,data_key,total_data): if data_key == None: raise ValueError("requires data_key") definition_record = self.find_definition_record(data_key,data_def) measurement_value = definition_record["GPIO_PIN"] measurement_key = data_def["main_field"] data = total_data[str.encode(measurement_key)] for i in data: if i[b"GPIO_PIN"] == measurement_value: rate = i[b"COUNTS"]*definition_record["conversion"] return rate raise ValueError("Flow meter not in data") def process_flat_input(self,data_def,data_key,data): result = {} #print("data",data) for i in data_def["fields"]: result[i["name"]] = data[str.encode(i["field"])] result["timestamp"] = time.time() return result
34.623853
96
0.630366