max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
Simulator/bipartite_graph_matrix.py
nicoladainese96/CoopVM
0
6613251
# -*- coding: utf-8 -*- """ Created on Wed Aug 8 16:40:39 2018 @author: <EMAIL> """ def adjacency_matrix_rnd(S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): import networkx as nx from networkx.algorithms import bipartite import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O file_path = dir_name+'/prova.txt' ensure_dir(file_path) out_dir = os.path.dirname(file_path) script_dir = os.getcwd() os.chdir(out_dir) #connectance IS NOT fixed, but its average <C> is equal to p #anyway on 100 realization no significant difference is detected G = bipartite.random_graph(S1, S2, p) deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') fig, [ax1, ax] = plt.subplots(1,2, figsize = (9,4)) ax1.set_title('Interazioni mutualistiche casuali') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax = ax1) A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione casuale con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.imshow(A1, cmap = 'Greys') if index == 1: fig.savefig('random_'+format(p,'.2f')+'.png') plt.close() if flag == False: if index == 1: my.print_tuple2(deg1, 'R_degree1-'+repr(index2), dir_name) my.print_tuple2(deg2, 'R_degree2-'+repr(index2), dir_name) else: O.print_list_csv(deg1, 'deg1_R-'+repr(index), dir_name) O.print_list_csv(deg2, 'deg2_R-'+repr(index), dir_name) os.chdir(script_dir) return A1 def adjacency_matrix_rnd2(S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): import networkx as nx from networkx.algorithms import bipartite import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O script_dir = os.getcwd() #print('dir_name = ', dir_name) #k is the number of non-null elements of the mutualistic matrix #in this case p = C in each realization k = int(round(S1*S2*p,0)) G = bipartite.gnmk_random_graph(S1, S2, k) #G = bipartite.random_graph(S1, S2, p) num_conn = G.number_of_edges() if num_conn != k: #this checks out if the number of connections of the random graphs is really k print('Problema numero connessioni.') #prints an alert otherwise print('# = ', num_conn) deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) if index == 1: plt.style.use('seaborn') file_path = dir_name+'/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) fig, [ax1, ax] = plt.subplots(1,2, figsize = (10,4)) #fig, [ax1, ax] = plt.subplots(1,2) ax1.set_title('Interazioni mutualistiche casuali') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax = ax1) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione casuale con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.imshow(A1, cmap = 'Greys') plt.tight_layout() fig.savefig('random_'+format(p,'.2f')+'.png') plt.close() else: file_path = dir_name+'/random_matrix/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) fig = plt.figure() ax = fig.add_subplot(111) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) ax.set_title('Matrice random realizzazione {}'.format(index)) ax.set_xlabel('S2 = {}'.format(S2)) ax.set_ylabel('S1 = {}'.format(S1)) ax.imshow(A1, cmap = 'Greys') fig.savefig('random_'+format(p,'.2f')+'-'+repr(index)+'.png') plt.close() #else: # O.print_list_csv(deg1, 'deg1_R-'+repr(index), dir_name) # O.print_list_csv(deg2, 'deg2_R-'+repr(index), dir_name) os.chdir(script_dir) return A1 def adjacency_matrix_nested (S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): #asymmetric nested (for low values of connectances) import networkx as nx import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O script_dir = os.getcwd() file_path = dir_name+'/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) #attenzione, funziona bene solo con S1 = S2 G = nx.Graph() nodes = [x for x in range(S1+S2)] G.add_nodes_from(nodes) #print(list(G.nodes())) edges = [] for i in range(S1): for j in range(S1, S1+S2-i): edges.append((i,j)) G.add_edges_from(edges) if p < 0.55: G = adjust_edges(G, S1, S2, p) #this is where the actual edges are decided in most cases (C < 0.55) if p > 0.55: G = adjust_edges2(G, S1, S2, p) #this should be usless in all real-connectance cases deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) #togliere quando finito if index == 0: plt.style.use('seaborn') fig, [ax1, ax] = plt.subplots(1,2, figsize = (10,4)) ax1.set_title('Interazioni mutualistiche nidificate') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax=ax1) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione nidificata con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.tight_layout() plt.imshow(A1, cmap = 'Greys') #rimuovere questa parte if index == 1: plt.style.use('seaborn') fig = plt.figure() ax = fig.add_subplot(111) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Nested I tipo con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.tight_layout() plt.imshow(A1, cmap = 'Greys') if index == 1: fig.savefig('nested_'+format(p,'.2f')+'.png') if flag == False: if index == 1: my.print_tuple2(deg1, 'N_degree1-'+repr(index2), dir_name) my.print_tuple2(deg2, 'N_degree2-'+repr(index2), dir_name) else: O.print_list_csv(deg1, 'deg1_N-'+repr(index), dir_name) O.print_list_csv(deg2, 'deg2_N-'+repr(index), dir_name) plt.close() os.chdir(script_dir) return A1 def adjacency_matrix_nested2 (S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): #balanced nested import networkx as nx import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O script_dir = os.getcwd() file_path = dir_name+'/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) #attenzione, funziona bene solo con S1 = S2 G = nx.Graph() nodes = [x for x in range(S1+S2)] G.add_nodes_from(nodes) #print(list(G.nodes())) edges = [] for i in range(S1): for j in range(S1, S1+S2-i): edges.append((i,j)) G.add_edges_from(edges) if p < 0.55: #this is where the actual edges are decided in most cases (C < 0.55) - different method G = adjust_edges1(G, S1, S2, p) if p > 0.55: G = adjust_edges2(G, S1, S2, p) deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) #rimettere if index == 1 if index == 0: plt.style.use('seaborn') fig, [ax1, ax] = plt.subplots(1,2, figsize = (10,4)) ax1.set_title('Interazioni mutualistiche nidificate') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax=ax1) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione nidificata con C = '+format(p,'.2f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.imshow(A1, cmap = 'Greys') plt.tight_layout() fig.savefig('nested_'+format(p,'.2f')+'_II.png') if index == 1: fig = plt.figure() ax = fig.add_subplot(111) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Nested II tipo con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.tight_layout() plt.imshow(A1, cmap = 'Greys') if index == 1: fig.savefig('nested_'+format(p,'.2f')+'_II.png') if flag == False: if index == 1: my.print_tuple2(deg1, 'N_degree1-'+repr(index2), dir_name) my.print_tuple2(deg2, 'N_degree2-'+repr(index2), dir_name) else: O.print_list_csv(deg1, 'deg1_N-'+repr(index), dir_name) O.print_list_csv(deg2, 'deg2_N-'+repr(index), dir_name) plt.close() os.chdir(script_dir) return A1 def adjust_edges(G, S1, S2, C): #first method of adjusting edges - asymmetric for low Cs import networkx as nx in_edges = G.number_of_edges() #print(in_edges) fin_edges = int(round(C*S1*S2,0)) #number of final edges to be obtained #print(fin_edges) delta = in_edges - fin_edges #final < initial #print(delta) edges = list(G.edges()) #print(edges) index = [] for i in range(len(edges)): #we start with a triangular matrix [1/0] #edges with lower values will be the first ones to be recided #untill the request connectance is achieved i_x_i = edges[i][0]*(edges[i][1]-S1) #this is the algorithm that assign a value to each edge index.append(i_x_i) #print(index) M = max(index) sort_edg = [] for i in range(0,M+1): for j in range(len(edges)): if index[j] == i: sort_edg.append(edges[j]) #this list contains the edges ordered with increasing values #print(sort_edg) new_edges = sort_edg[:-delta] F = nx.Graph() nodes = [x for x in range(S1+S2)] F.add_nodes_from(nodes) F.add_edges_from(new_edges) return F def adjust_edges1(G, S1, S2, C): #second method of adjusting edges - more balanced import networkx as nx in_edges = G.number_of_edges() #print('in edges = ', in_edges, '\n') fin_edges = int(round(C*S1*S2,0)) #print('fin_edges = ', fin_edges, '\n') delta = in_edges - fin_edges #print('delta = ', delta, '\n') edges = list(G.edges()) #print('edges = ', edges, '\n') index = [] for i in range(len(edges)): if i == 0: i_x_i = (edges[i][0]+2)*(edges[i][1]-S1+2) else: i_x_i = (edges[i][0]+1)*(edges[i][1]-S1+1) index.append(i_x_i) #print('index = ', index, '\n') M = max(index) sort_edg = [] for i in range(0,M+1): for j in range(len(edges)): if index[j] == i: sort_edg.append(edges[j]) #print(sort_edg) new_edges = sort_edg[:-delta] F = nx.Graph() nodes = [x for x in range(S1+S2)] F.add_nodes_from(nodes) F.add_edges_from(new_edges) return F def adjust_edges2(G, S1, S2, C): #import networkx as nx Sq = S1*S2 conn = check_connectance (G, Sq) while conn < C: for i in range(S1): if conn > C: break deg = G.degree(i) if deg < S2: G.add_edge(i,deg+S1) conn = check_connectance(G, Sq) else: continue return G def check_connectance (G, Sq): #import networkx as nx in_edges = G.number_of_edges() C = in_edges/(Sq) return C
# -*- coding: utf-8 -*- """ Created on Wed Aug 8 16:40:39 2018 @author: <EMAIL> """ def adjacency_matrix_rnd(S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): import networkx as nx from networkx.algorithms import bipartite import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O file_path = dir_name+'/prova.txt' ensure_dir(file_path) out_dir = os.path.dirname(file_path) script_dir = os.getcwd() os.chdir(out_dir) #connectance IS NOT fixed, but its average <C> is equal to p #anyway on 100 realization no significant difference is detected G = bipartite.random_graph(S1, S2, p) deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') fig, [ax1, ax] = plt.subplots(1,2, figsize = (9,4)) ax1.set_title('Interazioni mutualistiche casuali') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax = ax1) A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione casuale con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.imshow(A1, cmap = 'Greys') if index == 1: fig.savefig('random_'+format(p,'.2f')+'.png') plt.close() if flag == False: if index == 1: my.print_tuple2(deg1, 'R_degree1-'+repr(index2), dir_name) my.print_tuple2(deg2, 'R_degree2-'+repr(index2), dir_name) else: O.print_list_csv(deg1, 'deg1_R-'+repr(index), dir_name) O.print_list_csv(deg2, 'deg2_R-'+repr(index), dir_name) os.chdir(script_dir) return A1 def adjacency_matrix_rnd2(S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): import networkx as nx from networkx.algorithms import bipartite import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O script_dir = os.getcwd() #print('dir_name = ', dir_name) #k is the number of non-null elements of the mutualistic matrix #in this case p = C in each realization k = int(round(S1*S2*p,0)) G = bipartite.gnmk_random_graph(S1, S2, k) #G = bipartite.random_graph(S1, S2, p) num_conn = G.number_of_edges() if num_conn != k: #this checks out if the number of connections of the random graphs is really k print('Problema numero connessioni.') #prints an alert otherwise print('# = ', num_conn) deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) if index == 1: plt.style.use('seaborn') file_path = dir_name+'/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) fig, [ax1, ax] = plt.subplots(1,2, figsize = (10,4)) #fig, [ax1, ax] = plt.subplots(1,2) ax1.set_title('Interazioni mutualistiche casuali') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax = ax1) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione casuale con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.imshow(A1, cmap = 'Greys') plt.tight_layout() fig.savefig('random_'+format(p,'.2f')+'.png') plt.close() else: file_path = dir_name+'/random_matrix/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) fig = plt.figure() ax = fig.add_subplot(111) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) ax.set_title('Matrice random realizzazione {}'.format(index)) ax.set_xlabel('S2 = {}'.format(S2)) ax.set_ylabel('S1 = {}'.format(S1)) ax.imshow(A1, cmap = 'Greys') fig.savefig('random_'+format(p,'.2f')+'-'+repr(index)+'.png') plt.close() #else: # O.print_list_csv(deg1, 'deg1_R-'+repr(index), dir_name) # O.print_list_csv(deg2, 'deg2_R-'+repr(index), dir_name) os.chdir(script_dir) return A1 def adjacency_matrix_nested (S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): #asymmetric nested (for low values of connectances) import networkx as nx import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O script_dir = os.getcwd() file_path = dir_name+'/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) #attenzione, funziona bene solo con S1 = S2 G = nx.Graph() nodes = [x for x in range(S1+S2)] G.add_nodes_from(nodes) #print(list(G.nodes())) edges = [] for i in range(S1): for j in range(S1, S1+S2-i): edges.append((i,j)) G.add_edges_from(edges) if p < 0.55: G = adjust_edges(G, S1, S2, p) #this is where the actual edges are decided in most cases (C < 0.55) if p > 0.55: G = adjust_edges2(G, S1, S2, p) #this should be usless in all real-connectance cases deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) #togliere quando finito if index == 0: plt.style.use('seaborn') fig, [ax1, ax] = plt.subplots(1,2, figsize = (10,4)) ax1.set_title('Interazioni mutualistiche nidificate') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax=ax1) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione nidificata con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.tight_layout() plt.imshow(A1, cmap = 'Greys') #rimuovere questa parte if index == 1: plt.style.use('seaborn') fig = plt.figure() ax = fig.add_subplot(111) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Nested I tipo con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.tight_layout() plt.imshow(A1, cmap = 'Greys') if index == 1: fig.savefig('nested_'+format(p,'.2f')+'.png') if flag == False: if index == 1: my.print_tuple2(deg1, 'N_degree1-'+repr(index2), dir_name) my.print_tuple2(deg2, 'N_degree2-'+repr(index2), dir_name) else: O.print_list_csv(deg1, 'deg1_N-'+repr(index), dir_name) O.print_list_csv(deg2, 'deg2_N-'+repr(index), dir_name) plt.close() os.chdir(script_dir) return A1 def adjacency_matrix_nested2 (S1 = 10, S2 = 10, p = 0.33, dir_name = 'graph', index = 1, index2 = 1, flag = False): #balanced nested import networkx as nx import matplotlib.pyplot as plt import os from ensure_dir import ensure_dir import my_print as my import my_output as O script_dir = os.getcwd() file_path = dir_name+'/prova.txt' ensure_dir(file_path) directory = os.path.dirname(file_path) os.chdir(directory) #attenzione, funziona bene solo con S1 = S2 G = nx.Graph() nodes = [x for x in range(S1+S2)] G.add_nodes_from(nodes) #print(list(G.nodes())) edges = [] for i in range(S1): for j in range(S1, S1+S2-i): edges.append((i,j)) G.add_edges_from(edges) if p < 0.55: #this is where the actual edges are decided in most cases (C < 0.55) - different method G = adjust_edges1(G, S1, S2, p) if p > 0.55: G = adjust_edges2(G, S1, S2, p) deg = list(G.degree()) deg1 = deg[:S1] deg2 = deg[S1:] pos = {x:[0,x] for x in range(S1)} for j in range(S2): pos[S1+j] = [1,j] colors = ['r' for i in range(0,S1)] for j in range(S2): colors.append('b') A = nx.to_numpy_matrix(G) A2 = A.getA() A1 = [] for x in range(S1): A1.append(A2[x][S1:]) #rimettere if index == 1 if index == 0: plt.style.use('seaborn') fig, [ax1, ax] = plt.subplots(1,2, figsize = (10,4)) ax1.set_title('Interazioni mutualistiche nidificate') ax1.set_axis_off() ax1.set_autoscale_on(True) nx.draw_networkx(G, pos = pos, node_color = colors, ax=ax1) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Configurazione nidificata con C = '+format(p,'.2f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.imshow(A1, cmap = 'Greys') plt.tight_layout() fig.savefig('nested_'+format(p,'.2f')+'_II.png') if index == 1: fig = plt.figure() ax = fig.add_subplot(111) ax.grid() xtics = [x-0.5 for x in range(0,S2+1)] ytics = [x-0.5 for x in range(0,S1+1)] ax.set_yticks(ytics) ax.set_xticks(xtics) ax.set_ylabel('Impollinatori') ax.set_xlabel('Piante') ax.set_title('Nested II tipo con C = '+format(p,'.3f')) ax.set_autoscale_on(True) plt.setp(ax.get_xticklabels(), visible=False) plt.setp(ax.get_yticklabels(), visible=False) plt.tight_layout() plt.imshow(A1, cmap = 'Greys') if index == 1: fig.savefig('nested_'+format(p,'.2f')+'_II.png') if flag == False: if index == 1: my.print_tuple2(deg1, 'N_degree1-'+repr(index2), dir_name) my.print_tuple2(deg2, 'N_degree2-'+repr(index2), dir_name) else: O.print_list_csv(deg1, 'deg1_N-'+repr(index), dir_name) O.print_list_csv(deg2, 'deg2_N-'+repr(index), dir_name) plt.close() os.chdir(script_dir) return A1 def adjust_edges(G, S1, S2, C): #first method of adjusting edges - asymmetric for low Cs import networkx as nx in_edges = G.number_of_edges() #print(in_edges) fin_edges = int(round(C*S1*S2,0)) #number of final edges to be obtained #print(fin_edges) delta = in_edges - fin_edges #final < initial #print(delta) edges = list(G.edges()) #print(edges) index = [] for i in range(len(edges)): #we start with a triangular matrix [1/0] #edges with lower values will be the first ones to be recided #untill the request connectance is achieved i_x_i = edges[i][0]*(edges[i][1]-S1) #this is the algorithm that assign a value to each edge index.append(i_x_i) #print(index) M = max(index) sort_edg = [] for i in range(0,M+1): for j in range(len(edges)): if index[j] == i: sort_edg.append(edges[j]) #this list contains the edges ordered with increasing values #print(sort_edg) new_edges = sort_edg[:-delta] F = nx.Graph() nodes = [x for x in range(S1+S2)] F.add_nodes_from(nodes) F.add_edges_from(new_edges) return F def adjust_edges1(G, S1, S2, C): #second method of adjusting edges - more balanced import networkx as nx in_edges = G.number_of_edges() #print('in edges = ', in_edges, '\n') fin_edges = int(round(C*S1*S2,0)) #print('fin_edges = ', fin_edges, '\n') delta = in_edges - fin_edges #print('delta = ', delta, '\n') edges = list(G.edges()) #print('edges = ', edges, '\n') index = [] for i in range(len(edges)): if i == 0: i_x_i = (edges[i][0]+2)*(edges[i][1]-S1+2) else: i_x_i = (edges[i][0]+1)*(edges[i][1]-S1+1) index.append(i_x_i) #print('index = ', index, '\n') M = max(index) sort_edg = [] for i in range(0,M+1): for j in range(len(edges)): if index[j] == i: sort_edg.append(edges[j]) #print(sort_edg) new_edges = sort_edg[:-delta] F = nx.Graph() nodes = [x for x in range(S1+S2)] F.add_nodes_from(nodes) F.add_edges_from(new_edges) return F def adjust_edges2(G, S1, S2, C): #import networkx as nx Sq = S1*S2 conn = check_connectance (G, Sq) while conn < C: for i in range(S1): if conn > C: break deg = G.degree(i) if deg < S2: G.add_edge(i,deg+S1) conn = check_connectance(G, Sq) else: continue return G def check_connectance (G, Sq): #import networkx as nx in_edges = G.number_of_edges() C = in_edges/(Sq) return C
en
0.662721
# -*- coding: utf-8 -*- Created on Wed Aug 8 16:40:39 2018 @author: <EMAIL> #connectance IS NOT fixed, but its average <C> is equal to p #anyway on 100 realization no significant difference is detected #print('dir_name = ', dir_name) #k is the number of non-null elements of the mutualistic matrix #in this case p = C in each realization #G = bipartite.random_graph(S1, S2, p) #this checks out if the number of connections of the random graphs is really k #prints an alert otherwise #fig, [ax1, ax] = plt.subplots(1,2) #else: # O.print_list_csv(deg1, 'deg1_R-'+repr(index), dir_name) # O.print_list_csv(deg2, 'deg2_R-'+repr(index), dir_name) #asymmetric nested (for low values of connectances) #attenzione, funziona bene solo con S1 = S2 #print(list(G.nodes())) #this is where the actual edges are decided in most cases (C < 0.55) #this should be usless in all real-connectance cases #togliere quando finito #rimuovere questa parte #balanced nested #attenzione, funziona bene solo con S1 = S2 #print(list(G.nodes())) #this is where the actual edges are decided in most cases (C < 0.55) - different method #rimettere if index == 1 #first method of adjusting edges - asymmetric for low Cs #print(in_edges) #number of final edges to be obtained #print(fin_edges) #final < initial #print(delta) #print(edges) #we start with a triangular matrix [1/0] #edges with lower values will be the first ones to be recided #untill the request connectance is achieved #this is the algorithm that assign a value to each edge #print(index) #this list contains the edges ordered with increasing values #print(sort_edg) #second method of adjusting edges - more balanced #print('in edges = ', in_edges, '\n') #print('fin_edges = ', fin_edges, '\n') #print('delta = ', delta, '\n') #print('edges = ', edges, '\n') #print('index = ', index, '\n') #print(sort_edg) #import networkx as nx #import networkx as nx
2.742739
3
1_strategy/rubber_duck.py
hypersport/Head-First-Design-Patterns-Python
0
6613252
<reponame>hypersport/Head-First-Design-Patterns-Python from duck import Duck from fly_no_way import FlyNoWay from squeak import Squeak class RubberDuck(Duck): def __init__(self): self.set_fly_behavior(FlyNoWay()) self.set_quack_behavior(Squeak()) def display(self): print('I\'m a real Rubber Duck')
from duck import Duck from fly_no_way import FlyNoWay from squeak import Squeak class RubberDuck(Duck): def __init__(self): self.set_fly_behavior(FlyNoWay()) self.set_quack_behavior(Squeak()) def display(self): print('I\'m a real Rubber Duck')
none
1
2.651242
3
exercises/simple-cipher/simple_cipher_test.py
akashsara/python
1
6613253
import unittest import re from simple_cipher import Cipher # Tests adapted from `problem-specifications//canonical-data.json` @ v2.0.0 class SimpleCipherTest(unittest.TestCase): # Utility functions def setUp(self): try: self.assertRaisesRegex except AttributeError: self.assertRaisesRegex = self.assertRaisesRegexp def assertRaisesWithMessage(self, exception): return self.assertRaisesRegex(exception, r".+") class RandomKeyCipherTest(SimpleCipherTest): def test_can_encode(self): cipher = Cipher() plaintext = 'aaaaaaaaaa' self.assertEqual(cipher.encode(plaintext), cipher.key[:len(plaintext)]) def test_can_decode(self): cipher = Cipher() plaintext = 'aaaaaaaaaa' self.assertEqual(cipher.decode(cipher.key[:len(plaintext)]), plaintext) def test_is_reversible(self): cipher = Cipher() plaintext = 'abcdefghij' self.assertEqual(cipher.decode(cipher.encode(plaintext)), plaintext) def test_key_is_only_made_of_lowercase_letters(self): self.assertIsNotNone(re.match('^[a-z]+$', Cipher().key)) class SubstitutionCipherTest(SimpleCipherTest): def test_can_encode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.encode('aaaaaaaaaa'), cipher.key) def test_can_decode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.decode(cipher.key), 'aaaaaaaaaa') def test_is_reversible(self): cipher = Cipher('abcdefghij') plaintext = 'abcdefghij' self.assertEqual(cipher.decode(cipher.encode(plaintext)), plaintext) def test_can_double_shift_encode(self): plaintext = 'iamapandabear' cipher = Cipher(plaintext) self.assertEqual(cipher.encode(plaintext), 'qayaeaagaciai') def test_can_wrap_on_encode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.encode('zzzzzzzzzz'), 'zabcdefghi') def test_can_wrap_on_decode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.decode('zabcdefghi'), 'zzzzzzzzzz') def test_can_encode_messages_longer_than_key(self): cipher = Cipher('abc') self.assertEqual(cipher.encode('iamapandabear'), 'iboaqcnecbfcr') def test_can_decode_messages_longer_than_key(self): cipher = Cipher('abc') self.assertEqual(cipher.decode('iboaqcnecbfcr'), 'iamapandabear') if __name__ == '__main__': unittest.main()
import unittest import re from simple_cipher import Cipher # Tests adapted from `problem-specifications//canonical-data.json` @ v2.0.0 class SimpleCipherTest(unittest.TestCase): # Utility functions def setUp(self): try: self.assertRaisesRegex except AttributeError: self.assertRaisesRegex = self.assertRaisesRegexp def assertRaisesWithMessage(self, exception): return self.assertRaisesRegex(exception, r".+") class RandomKeyCipherTest(SimpleCipherTest): def test_can_encode(self): cipher = Cipher() plaintext = 'aaaaaaaaaa' self.assertEqual(cipher.encode(plaintext), cipher.key[:len(plaintext)]) def test_can_decode(self): cipher = Cipher() plaintext = 'aaaaaaaaaa' self.assertEqual(cipher.decode(cipher.key[:len(plaintext)]), plaintext) def test_is_reversible(self): cipher = Cipher() plaintext = 'abcdefghij' self.assertEqual(cipher.decode(cipher.encode(plaintext)), plaintext) def test_key_is_only_made_of_lowercase_letters(self): self.assertIsNotNone(re.match('^[a-z]+$', Cipher().key)) class SubstitutionCipherTest(SimpleCipherTest): def test_can_encode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.encode('aaaaaaaaaa'), cipher.key) def test_can_decode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.decode(cipher.key), 'aaaaaaaaaa') def test_is_reversible(self): cipher = Cipher('abcdefghij') plaintext = 'abcdefghij' self.assertEqual(cipher.decode(cipher.encode(plaintext)), plaintext) def test_can_double_shift_encode(self): plaintext = 'iamapandabear' cipher = Cipher(plaintext) self.assertEqual(cipher.encode(plaintext), 'qayaeaagaciai') def test_can_wrap_on_encode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.encode('zzzzzzzzzz'), 'zabcdefghi') def test_can_wrap_on_decode(self): cipher = Cipher('abcdefghij') self.assertEqual(cipher.decode('zabcdefghi'), 'zzzzzzzzzz') def test_can_encode_messages_longer_than_key(self): cipher = Cipher('abc') self.assertEqual(cipher.encode('iamapandabear'), 'iboaqcnecbfcr') def test_can_decode_messages_longer_than_key(self): cipher = Cipher('abc') self.assertEqual(cipher.decode('iboaqcnecbfcr'), 'iamapandabear') if __name__ == '__main__': unittest.main()
en
0.801145
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.0.0 # Utility functions
3.119959
3
src/dungeonbot/tests/test_die_roll.py
tlake/dungeonbot_backup
0
6613254
# from flask_testing import TestCase from dungeonbot.conftest import BaseTest from dungeonbot.plugins.helpers.die_roll import DieRoll class DieRollTest(BaseTest): """Test Helper file: die_roll.""" def test_basic_init(self): """Test attributes of DieRoll object created with a basic roll string.""" roll = DieRoll("1d20", flag=None) assert roll.roll_str == "1d20" assert roll.operator == "+" assert roll.action == roll.roll_die assert roll.modifier == 0 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_a_init(self): """Test attributes of DieRoll object created with a roll string with advantage.""" roll = DieRoll("1d20", flag="a") assert roll.roll_str == "1d20" assert roll.operator == "+" assert roll.action == roll.advantage assert roll.modifier == 0 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_d_init(self): """Test attribuetes of DieRoll object created with a roll string with disadvantage.""" roll = DieRoll("1d20", flag="d") assert roll.roll_str == "1d20" assert roll.operator == "+" assert roll.action == roll.disadvantage assert roll.modifier == 0 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_positive_init(self): """Test attributes of DieRoll object created with a roll string with a plus.""" roll = DieRoll("1d20+32", flag=None) assert roll.roll_str == "1d20+32" assert roll.operator == "+" assert roll.action == roll.roll_die assert roll.modifier == 32 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_negative_init(self): """Test attributes of DieRoll object created with a roll string with a minus.""" roll = DieRoll("1d20-32", flag=None) assert roll.roll_str == "1d20-32" assert roll.operator == "-" assert roll.action == roll.roll_die assert roll.modifier == -32 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_negative_with_flag(self): """Test attributes with a minus and a flag.""" roll = DieRoll("1d20-32", flag="d") assert roll.roll_str == "1d20-32" assert roll.operator == "-" assert roll.action == roll.disadvantage assert roll.modifier == -32 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_print_results(self): """Test print results, basic.""" roll = DieRoll("1d20", flag=None) roll_result = 10 message = "*[ 10 ]* _(1d20 = 10 + 0) (min 1, max 20) _" assert roll.print_results(roll_result) == message def test_print_results_named(self): """Test print results with a name.""" roll = DieRoll("1d20", flag=None) roll_result = 10 name = "blue" message = "*[ 10 ]* _(1d20 = 10 + 0) (min 1, max 20) _ with blue" assert roll.print_results(roll_result, name=name) == message def test_roll_die(self): """Test method roll_die.""" roll = DieRoll("1d20-1", flag=None) assert roll.action == roll.roll_die assert roll.roll_die() in range(roll.min_roll, roll.max_roll + 1) def test_advantage(self): """Test method advantage.""" roll = DieRoll("1d20+1", flag="a") assert roll.action == roll.advantage assert roll.advantage() in range(roll.min_roll, roll.max_roll + 1) def test_disadvantage(self): """Test disadvantage method.""" roll = DieRoll("1d20+1", flag="d") assert roll.action == roll.disadvantage assert roll.disadvantage() in range(roll.min_roll, roll.max_roll + 1)
# from flask_testing import TestCase from dungeonbot.conftest import BaseTest from dungeonbot.plugins.helpers.die_roll import DieRoll class DieRollTest(BaseTest): """Test Helper file: die_roll.""" def test_basic_init(self): """Test attributes of DieRoll object created with a basic roll string.""" roll = DieRoll("1d20", flag=None) assert roll.roll_str == "1d20" assert roll.operator == "+" assert roll.action == roll.roll_die assert roll.modifier == 0 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_a_init(self): """Test attributes of DieRoll object created with a roll string with advantage.""" roll = DieRoll("1d20", flag="a") assert roll.roll_str == "1d20" assert roll.operator == "+" assert roll.action == roll.advantage assert roll.modifier == 0 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_d_init(self): """Test attribuetes of DieRoll object created with a roll string with disadvantage.""" roll = DieRoll("1d20", flag="d") assert roll.roll_str == "1d20" assert roll.operator == "+" assert roll.action == roll.disadvantage assert roll.modifier == 0 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_positive_init(self): """Test attributes of DieRoll object created with a roll string with a plus.""" roll = DieRoll("1d20+32", flag=None) assert roll.roll_str == "1d20+32" assert roll.operator == "+" assert roll.action == roll.roll_die assert roll.modifier == 32 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_negative_init(self): """Test attributes of DieRoll object created with a roll string with a minus.""" roll = DieRoll("1d20-32", flag=None) assert roll.roll_str == "1d20-32" assert roll.operator == "-" assert roll.action == roll.roll_die assert roll.modifier == -32 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_negative_with_flag(self): """Test attributes with a minus and a flag.""" roll = DieRoll("1d20-32", flag="d") assert roll.roll_str == "1d20-32" assert roll.operator == "-" assert roll.action == roll.disadvantage assert roll.modifier == -32 assert roll.message == "" assert roll.number == 1 assert roll.sides == 20 assert roll.min_roll == roll.number assert roll.max_roll == roll.sides * roll.number def test_print_results(self): """Test print results, basic.""" roll = DieRoll("1d20", flag=None) roll_result = 10 message = "*[ 10 ]* _(1d20 = 10 + 0) (min 1, max 20) _" assert roll.print_results(roll_result) == message def test_print_results_named(self): """Test print results with a name.""" roll = DieRoll("1d20", flag=None) roll_result = 10 name = "blue" message = "*[ 10 ]* _(1d20 = 10 + 0) (min 1, max 20) _ with blue" assert roll.print_results(roll_result, name=name) == message def test_roll_die(self): """Test method roll_die.""" roll = DieRoll("1d20-1", flag=None) assert roll.action == roll.roll_die assert roll.roll_die() in range(roll.min_roll, roll.max_roll + 1) def test_advantage(self): """Test method advantage.""" roll = DieRoll("1d20+1", flag="a") assert roll.action == roll.advantage assert roll.advantage() in range(roll.min_roll, roll.max_roll + 1) def test_disadvantage(self): """Test disadvantage method.""" roll = DieRoll("1d20+1", flag="d") assert roll.action == roll.disadvantage assert roll.disadvantage() in range(roll.min_roll, roll.max_roll + 1)
en
0.769884
# from flask_testing import TestCase Test Helper file: die_roll. Test attributes of DieRoll object created with a basic roll string. Test attributes of DieRoll object created with a roll string with advantage. Test attribuetes of DieRoll object created with a roll string with disadvantage. Test attributes of DieRoll object created with a roll string with a plus. Test attributes of DieRoll object created with a roll string with a minus. Test attributes with a minus and a flag. Test print results, basic. Test print results with a name. Test method roll_die. Test method advantage. Test disadvantage method.
3.030823
3
Seeder/contracts/signals.py
WebarchivCZ/Seeder
8
6613255
<gh_stars>1-10 # pylint: disable=W0613 from . import constants from django.dispatch import receiver from django.db.models.signals import post_save from contracts.models import Contract from source import constants as source_constants @receiver(signal=post_save, sender=Contract) def process_contract_change(instance, created, **kwargs): """ If the contract is marked as valid then source is accepted """ if not created and instance.state in constants.STATE_CONVERSION: instance.sources.filter( state=source_constants.STATE_ACCEPTED_BY_STAFF ).update(state = constants.STATE_CONVERSION[instance.state])
# pylint: disable=W0613 from . import constants from django.dispatch import receiver from django.db.models.signals import post_save from contracts.models import Contract from source import constants as source_constants @receiver(signal=post_save, sender=Contract) def process_contract_change(instance, created, **kwargs): """ If the contract is marked as valid then source is accepted """ if not created and instance.state in constants.STATE_CONVERSION: instance.sources.filter( state=source_constants.STATE_ACCEPTED_BY_STAFF ).update(state = constants.STATE_CONVERSION[instance.state])
en
0.92603
# pylint: disable=W0613 If the contract is marked as valid then source is accepted
1.973728
2
easy/1021-remove-outermost-parentheses.py
changmeng72/leecode_python3
0
6613256
<filename>easy/1021-remove-outermost-parentheses.py class Solution: def removeOuterParentheses(self, s: str) -> str: r =[] start = 0 count = 1 for i in range(1,len(s)): if(s[i]=='('): count += 1 else: count -=1 if(count==0): r.append(s[start+1:i]) start = i+1 return ''.join(r)
<filename>easy/1021-remove-outermost-parentheses.py class Solution: def removeOuterParentheses(self, s: str) -> str: r =[] start = 0 count = 1 for i in range(1,len(s)): if(s[i]=='('): count += 1 else: count -=1 if(count==0): r.append(s[start+1:i]) start = i+1 return ''.join(r)
none
1
3.446983
3
hwt/serializer/generic/context.py
mgielda/hwt
0
6613257
from copy import copy class SerializerCtx(): """ Serializer context :ivar scope: instance of NameScope used to check id availability :ivar indent: number of visual indentations for code in this context :ivar createTmpVarFn: function (sugestedName, dtype) returns variable this function will be called to create tmp variables :ivar constCache: constant cache to extract frequently used large constant values from code (visual improvement) :ivar currentUnit: current Unit instance or None """ def __init__(self, scope, indent: int, createTmpVarFn, constCache=None): self.scope = scope self.indent = indent self.currentUnit = None if createTmpVarFn is None: self.createTmpVarFn = self.defaultCreateTmpVarFn else: self.createTmpVarFn = createTmpVarFn self.constCache = constCache def defaultCreateTmpVarFn(self, sugestedName, dtype): raise NotImplementedError() def withIndent(self, indent=1): """ Create copy of this context with increased indent """ ctx = copy(self) ctx.indent += indent return ctx
from copy import copy class SerializerCtx(): """ Serializer context :ivar scope: instance of NameScope used to check id availability :ivar indent: number of visual indentations for code in this context :ivar createTmpVarFn: function (sugestedName, dtype) returns variable this function will be called to create tmp variables :ivar constCache: constant cache to extract frequently used large constant values from code (visual improvement) :ivar currentUnit: current Unit instance or None """ def __init__(self, scope, indent: int, createTmpVarFn, constCache=None): self.scope = scope self.indent = indent self.currentUnit = None if createTmpVarFn is None: self.createTmpVarFn = self.defaultCreateTmpVarFn else: self.createTmpVarFn = createTmpVarFn self.constCache = constCache def defaultCreateTmpVarFn(self, sugestedName, dtype): raise NotImplementedError() def withIndent(self, indent=1): """ Create copy of this context with increased indent """ ctx = copy(self) ctx.indent += indent return ctx
en
0.644386
Serializer context :ivar scope: instance of NameScope used to check id availability :ivar indent: number of visual indentations for code in this context :ivar createTmpVarFn: function (sugestedName, dtype) returns variable this function will be called to create tmp variables :ivar constCache: constant cache to extract frequently used large constant values from code (visual improvement) :ivar currentUnit: current Unit instance or None Create copy of this context with increased indent
2.580711
3
py_headless_daw/dsp_utils/drum_synth/one_shot_oscillator.py
hq9000/py-headless-daw
22
6613258
<reponame>hq9000/py-headless-daw import numpy as np import math from py_headless_daw.dsp_utils.adsr_envelope import ADSREnvelope from py_headless_daw.dsp_utils.wave_producer_interface import WaveProducerInterface class OneShotOscillator(WaveProducerInterface): # different signals https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sawtooth.html TYPE_SINE: str = "sine" TYPE_NOISE: str = "noise" TYPE_TRIANGLE: str = "triangle" def __init__(self): self.pitch_envelope: ADSREnvelope = self._create_default_envelope() self.volume_envelope: ADSREnvelope = self._create_default_envelope() self.zero_frequency: float = 440.0 self.frequency_range: float = 440.0 self.wave = self.TYPE_SINE self.distortion: float = 0.0 self.initial_phase: float = 0.0 self.volume: float = 1.0 def render_to_buffer(self, output_buffer: np.ndarray, sample_rate: int, start_sample: int, mode: str = WaveProducerInterface.MODE_REPLACE): if output_buffer.ndim != 1: raise ValueError(f"buffers given to an oscillator are supposed to be mono, " f"this one has {output_buffer.ndim} dimensions instead of 1") volume_env_wave = np.copy(output_buffer) self.volume_envelope.render_to_buffer(volume_env_wave, sample_rate, start_sample) oscillation_wave = np.copy(output_buffer) self._generate_oscillation_wave(oscillation_wave, sample_rate, start_sample) np.multiply(oscillation_wave, volume_env_wave, out=oscillation_wave) if WaveProducerInterface.MODE_REPLACE == mode: np.copyto(output_buffer, oscillation_wave) elif WaveProducerInterface.MODE_MIX == mode: np.add(output_buffer, oscillation_wave, out=output_buffer) else: raise ValueError(f'unknown producing mode {mode}') def _create_default_envelope(self) -> ADSREnvelope: return ADSREnvelope(attack_time=0, decay_time=1, sustain_level=1, sustain_time=0, release_time=1) def _generate_oscillation_wave(self, out_buffer: np.ndarray, sample_rate, start_sample): phase: float = self.initial_phase for i in range(out_buffer.shape[0]): relative_frequency = self.pitch_envelope.get_one_value(start_sample + i, sample_rate) real_frequency = self.zero_frequency + self.frequency_range * relative_frequency out_buffer[i] = self._wave_function(self.wave, self.distortion, phase) if real_frequency > 0: samples_in_period = (1 / real_frequency) * sample_rate oscillator_step_for_one_sample = 2 * math.pi / samples_in_period phase += oscillator_step_for_one_sample def _wave_function(self, waveform: str, distortion: float, phase: float) -> float: return math.sin(phase)
import numpy as np import math from py_headless_daw.dsp_utils.adsr_envelope import ADSREnvelope from py_headless_daw.dsp_utils.wave_producer_interface import WaveProducerInterface class OneShotOscillator(WaveProducerInterface): # different signals https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sawtooth.html TYPE_SINE: str = "sine" TYPE_NOISE: str = "noise" TYPE_TRIANGLE: str = "triangle" def __init__(self): self.pitch_envelope: ADSREnvelope = self._create_default_envelope() self.volume_envelope: ADSREnvelope = self._create_default_envelope() self.zero_frequency: float = 440.0 self.frequency_range: float = 440.0 self.wave = self.TYPE_SINE self.distortion: float = 0.0 self.initial_phase: float = 0.0 self.volume: float = 1.0 def render_to_buffer(self, output_buffer: np.ndarray, sample_rate: int, start_sample: int, mode: str = WaveProducerInterface.MODE_REPLACE): if output_buffer.ndim != 1: raise ValueError(f"buffers given to an oscillator are supposed to be mono, " f"this one has {output_buffer.ndim} dimensions instead of 1") volume_env_wave = np.copy(output_buffer) self.volume_envelope.render_to_buffer(volume_env_wave, sample_rate, start_sample) oscillation_wave = np.copy(output_buffer) self._generate_oscillation_wave(oscillation_wave, sample_rate, start_sample) np.multiply(oscillation_wave, volume_env_wave, out=oscillation_wave) if WaveProducerInterface.MODE_REPLACE == mode: np.copyto(output_buffer, oscillation_wave) elif WaveProducerInterface.MODE_MIX == mode: np.add(output_buffer, oscillation_wave, out=output_buffer) else: raise ValueError(f'unknown producing mode {mode}') def _create_default_envelope(self) -> ADSREnvelope: return ADSREnvelope(attack_time=0, decay_time=1, sustain_level=1, sustain_time=0, release_time=1) def _generate_oscillation_wave(self, out_buffer: np.ndarray, sample_rate, start_sample): phase: float = self.initial_phase for i in range(out_buffer.shape[0]): relative_frequency = self.pitch_envelope.get_one_value(start_sample + i, sample_rate) real_frequency = self.zero_frequency + self.frequency_range * relative_frequency out_buffer[i] = self._wave_function(self.wave, self.distortion, phase) if real_frequency > 0: samples_in_period = (1 / real_frequency) * sample_rate oscillator_step_for_one_sample = 2 * math.pi / samples_in_period phase += oscillator_step_for_one_sample def _wave_function(self, waveform: str, distortion: float, phase: float) -> float: return math.sin(phase)
en
0.393301
# different signals https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.sawtooth.html
2.590454
3
scripts/make_spreadsheet.py
ElementAI/data-augmentation-with-llms
2
6613259
<gh_stars>1-10 import os, json, pickle from openpyxl import Workbook from openpyxl.styles import Font DATASET_NAME = "banking77" def load_data(): data_dir = f"./data/{DATASET_NAME}" opj = os.path.join ds_full_suite = pickle.load(open(opj(data_dir, "full/data_full_suite.pkl"), "rb")) generated_samples = pickle.load(open(opj(data_dir, "full/al_dataset.pkl"), "rb"))[ "generated" ] id2name = json.load(open(opj(data_dir, "id2name.json"))) return ds_full_suite, generated_samples, id2name def massage_data(ds_full_suite, generated_samples, id2name): workbooks = {} for engine in generated_samples: workbooks[engine] = gather_workbook_data( ds_full_suite, generated_samples[engine], id2name ) return workbooks def gather_workbook_data(ds_full_suite, generated_samples, id2name): workbook_data = {} for domain in ds_full_suite: # generate prompt column prompt_data = ds_full_suite[domain]["F"]["train"] for text, intent_id in zip(prompt_data["text"], prompt_data["intent"]): intent_name = id2name[str(intent_id)].replace("?", "") sheet_name = f"{domain}<>{intent_name}" if sheet_name not in workbook_data: workbook_data[sheet_name] = { "prompt": [], "generated": [], "oracle_prediction": [], } workbook_data[sheet_name]["prompt"].append(text) # add generated data, and oracle prediction data for text, oracle_intent_id, org_intent_id in zip( generated_samples["text"], generated_samples["intent"], generated_samples["old_intent"], ): oracle_pred = id2name[str(oracle_intent_id)].replace("?", "") org_intent_name = id2name[str(org_intent_id)].replace("?", "") sheet_name = f"{domain}<>{org_intent_name}" if sheet_name not in workbook_data: # print(f"sheet {sheet_name} doesn't exist") continue workbook_data[sheet_name]["generated"].append(text) workbook_data[sheet_name]["oracle_prediction"].append(oracle_pred) return workbook_data def create_excel_sheet(name, data): wb = Workbook() wb.remove(wb.active) # remove the empty "Sheet" # create different sheets for sheet_name in data: org_intent = sheet_name.split("<>", 1)[1] ws = wb.create_sheet(sheet_name) prompts = data[sheet_name]["prompt"] generated = data[sheet_name]["generated"] oracle_predictions = data[sheet_name]["oracle_prediction"] ############# compute some quantities for formatting ############## # max width of column A max_sent_length = max(map(len, prompts + generated)) # max width of column B max_pred_length = max(map(len, oracle_predictions)) total_faithful_samples = oracle_predictions.count(org_intent) ############# compute end ################# # add the first column ws.append(["Sentences", "Oracle Predictions"]) # add the sentences column for irow in range(len(prompts + generated)): if irow < len(prompts): ws.append([prompts[irow]]) else: new_irow = irow - len(prompts) ws.append([generated[new_irow], oracle_predictions[new_irow]]) # some analysis ws["C1"] = "Total faithful samples" ws["C2"] = f"{total_faithful_samples}/{len(generated)}" ws["C3"] = f"{total_faithful_samples/len(generated)*100:.2f}%" # adjust column widths ws.column_dimensions["A"].width = max_sent_length ws.column_dimensions["B"].width = max_pred_length ws.column_dimensions["C"].width = len("Total faithful samples") # increase font size n_rows = len(prompts + generated) for col, n_rows in [("A", n_rows), ("B", n_rows), ("C", 3)]: for i in range(1, n_rows + 2): ws[f"{col}{i}"].font = Font(size=14) # bold the first row ws["A1"].font = Font(bold=True, size=14) ws["B1"].font = Font(bold=True, size=14) ws["C1"].font = Font(bold=True, size=14) # delete this useless sheet # sort sheets based on fidelity (ws['C3'] is the fidelity) wb._sheets.sort(key=lambda ws: float(ws["C3"].value[:-1])) wb.active = 0 save_folder = f"spreadsheets/{DATASET_NAME}" if not os.path.exists(save_folder): os.mkdir(save_folder) wb.save(os.path.join(save_folder, f"{name}.xlsx")) if __name__ == "__main__": workbooks = massage_data(*load_data()) for engine_temp, data in workbooks.items(): if "_" in engine_temp and engine_temp.split("_")[1] != "1.0": continue create_excel_sheet(engine_temp, data)
import os, json, pickle from openpyxl import Workbook from openpyxl.styles import Font DATASET_NAME = "banking77" def load_data(): data_dir = f"./data/{DATASET_NAME}" opj = os.path.join ds_full_suite = pickle.load(open(opj(data_dir, "full/data_full_suite.pkl"), "rb")) generated_samples = pickle.load(open(opj(data_dir, "full/al_dataset.pkl"), "rb"))[ "generated" ] id2name = json.load(open(opj(data_dir, "id2name.json"))) return ds_full_suite, generated_samples, id2name def massage_data(ds_full_suite, generated_samples, id2name): workbooks = {} for engine in generated_samples: workbooks[engine] = gather_workbook_data( ds_full_suite, generated_samples[engine], id2name ) return workbooks def gather_workbook_data(ds_full_suite, generated_samples, id2name): workbook_data = {} for domain in ds_full_suite: # generate prompt column prompt_data = ds_full_suite[domain]["F"]["train"] for text, intent_id in zip(prompt_data["text"], prompt_data["intent"]): intent_name = id2name[str(intent_id)].replace("?", "") sheet_name = f"{domain}<>{intent_name}" if sheet_name not in workbook_data: workbook_data[sheet_name] = { "prompt": [], "generated": [], "oracle_prediction": [], } workbook_data[sheet_name]["prompt"].append(text) # add generated data, and oracle prediction data for text, oracle_intent_id, org_intent_id in zip( generated_samples["text"], generated_samples["intent"], generated_samples["old_intent"], ): oracle_pred = id2name[str(oracle_intent_id)].replace("?", "") org_intent_name = id2name[str(org_intent_id)].replace("?", "") sheet_name = f"{domain}<>{org_intent_name}" if sheet_name not in workbook_data: # print(f"sheet {sheet_name} doesn't exist") continue workbook_data[sheet_name]["generated"].append(text) workbook_data[sheet_name]["oracle_prediction"].append(oracle_pred) return workbook_data def create_excel_sheet(name, data): wb = Workbook() wb.remove(wb.active) # remove the empty "Sheet" # create different sheets for sheet_name in data: org_intent = sheet_name.split("<>", 1)[1] ws = wb.create_sheet(sheet_name) prompts = data[sheet_name]["prompt"] generated = data[sheet_name]["generated"] oracle_predictions = data[sheet_name]["oracle_prediction"] ############# compute some quantities for formatting ############## # max width of column A max_sent_length = max(map(len, prompts + generated)) # max width of column B max_pred_length = max(map(len, oracle_predictions)) total_faithful_samples = oracle_predictions.count(org_intent) ############# compute end ################# # add the first column ws.append(["Sentences", "Oracle Predictions"]) # add the sentences column for irow in range(len(prompts + generated)): if irow < len(prompts): ws.append([prompts[irow]]) else: new_irow = irow - len(prompts) ws.append([generated[new_irow], oracle_predictions[new_irow]]) # some analysis ws["C1"] = "Total faithful samples" ws["C2"] = f"{total_faithful_samples}/{len(generated)}" ws["C3"] = f"{total_faithful_samples/len(generated)*100:.2f}%" # adjust column widths ws.column_dimensions["A"].width = max_sent_length ws.column_dimensions["B"].width = max_pred_length ws.column_dimensions["C"].width = len("Total faithful samples") # increase font size n_rows = len(prompts + generated) for col, n_rows in [("A", n_rows), ("B", n_rows), ("C", 3)]: for i in range(1, n_rows + 2): ws[f"{col}{i}"].font = Font(size=14) # bold the first row ws["A1"].font = Font(bold=True, size=14) ws["B1"].font = Font(bold=True, size=14) ws["C1"].font = Font(bold=True, size=14) # delete this useless sheet # sort sheets based on fidelity (ws['C3'] is the fidelity) wb._sheets.sort(key=lambda ws: float(ws["C3"].value[:-1])) wb.active = 0 save_folder = f"spreadsheets/{DATASET_NAME}" if not os.path.exists(save_folder): os.mkdir(save_folder) wb.save(os.path.join(save_folder, f"{name}.xlsx")) if __name__ == "__main__": workbooks = massage_data(*load_data()) for engine_temp, data in workbooks.items(): if "_" in engine_temp and engine_temp.split("_")[1] != "1.0": continue create_excel_sheet(engine_temp, data)
en
0.569877
# generate prompt column # add generated data, and oracle prediction data # print(f"sheet {sheet_name} doesn't exist") # remove the empty "Sheet" # create different sheets ############# compute some quantities for formatting ############## # max width of column A # max width of column B ############# compute end ################# # add the first column # add the sentences column # some analysis # adjust column widths # increase font size # bold the first row # delete this useless sheet # sort sheets based on fidelity (ws['C3'] is the fidelity)
2.667352
3
CardPotential.py
PChild/card-counter
0
6613260
import os import tbapy tba = tbapy.TBA(os.getenv("TBA_KEY")) for event in tba.events(year=2019): if event['event_type'] in range(0, 10): for match in tba.event_matches(event['key']): if
import os import tbapy tba = tbapy.TBA(os.getenv("TBA_KEY")) for event in tba.events(year=2019): if event['event_type'] in range(0, 10): for match in tba.event_matches(event['key']): if
none
1
2.54503
3
coursera/python_programming_basics/1_week_09_lesson_rubles.py
anklav24/Python-Education
0
6613261
a = int(input()) b = int(input()) c = int(input()) d = int(input()) cost1 = a * 100 + b cost2 = c * 100 + d total_cost = cost1 + cost2 print(total_cost // 100, total_cost % 100, sep=',')
a = int(input()) b = int(input()) c = int(input()) d = int(input()) cost1 = a * 100 + b cost2 = c * 100 + d total_cost = cost1 + cost2 print(total_cost // 100, total_cost % 100, sep=',')
none
1
3.634464
4
yolov4_pytorch/model/module/__init__.py
Lornatang/YOLOv4-PyTorch
19
6613262
<reponame>Lornatang/YOLOv4-PyTorch # Copyright 2020 Lorna Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from .activations import HardSwish from .activations import MemoryEfficientMish from .activations import MemoryEfficientSwish from .activations import Mish from .activations import MishImplementation from .activations import Swish from .activations import SwishImplementation from .common import Concat from .common import Focus from .common import check_anchor_order from .conv import C3 from .conv import Conv from .conv import ConvBNMish from .conv import CrossConv from .conv import DWConv from .conv import MixConv2d from .conv import MobileNetConv from .conv import autopad from .head import SPP from .layer import Detect from .layer import YOLO from .layer import parse_model from .neck import Bottleneck from .neck import BottleneckCSP from .neck import YOLOv4_Bottleneck __all__ = [ "HardSwish", "MemoryEfficientMish", "MemoryEfficientSwish", "Mish", "MishImplementation", "Swish", "SwishImplementation", "Concat", "Focus", "check_anchor_order", "C3", "Conv", "ConvBNMish", "CrossConv", "DWConv", "MixConv2d", "MobileNetConv", "autopad", "SPP", "Detect", "YOLO", "parse_model", "Bottleneck", "BottleneckCSP", "YOLOv4_Bottleneck", ]
# Copyright 2020 Lorna Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from .activations import HardSwish from .activations import MemoryEfficientMish from .activations import MemoryEfficientSwish from .activations import Mish from .activations import MishImplementation from .activations import Swish from .activations import SwishImplementation from .common import Concat from .common import Focus from .common import check_anchor_order from .conv import C3 from .conv import Conv from .conv import ConvBNMish from .conv import CrossConv from .conv import DWConv from .conv import MixConv2d from .conv import MobileNetConv from .conv import autopad from .head import SPP from .layer import Detect from .layer import YOLO from .layer import parse_model from .neck import Bottleneck from .neck import BottleneckCSP from .neck import YOLOv4_Bottleneck __all__ = [ "HardSwish", "MemoryEfficientMish", "MemoryEfficientSwish", "Mish", "MishImplementation", "Swish", "SwishImplementation", "Concat", "Focus", "check_anchor_order", "C3", "Conv", "ConvBNMish", "CrossConv", "DWConv", "MixConv2d", "MobileNetConv", "autopad", "SPP", "Detect", "YOLO", "parse_model", "Bottleneck", "BottleneckCSP", "YOLOv4_Bottleneck", ]
en
0.814997
# Copyright 2020 Lorna Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================
1.045587
1
qutilities/line_delay.py
Emigon/qutilities
0
6613263
""" processing methods to remove line delay from a resonance author: <NAME> """ import numpy as np import sympy as sp from fitkit import * from fitkit.decimate import * from .circle import * from qutilities import * def pm_line_delay(b_tau = (0, 0, 25/3e8)): """ returns a Parametric1D model for the line delay Params: tau: The line delay Args: Parameter bounds as required by Parametric1D """ tau, f = sp.symbols('tau f') return Parametric1D(sp.exp(-2j*np.pi*tau*f), {'tau': b_tau}) def rm_line_delay(s21, k = 10, N = 201): """ remove the line delay from s21 Args: s21: A Signal1D representation of the s21 data. Using approximately 10*fwhm of the resonance is recommended k: The number of samples from the beggining of the phase response used to estimate the initial gradient before optimiser polishing N: The number of points to decimate s21 by when fitting tau using the non-linear optimiser Returns: s21: The input s21 data with the line delay removed from the phase response model: The Parametric1D model for the line delay """ # unwrap the phase and fit linear model to obtain a starting point for tau phi = np.unwrap(np.angle(s21.values)) p = np.poly1d(np.polyfit(s21.x[:k], phi[:k], 1)) tau_0 = p.c[0]/(2*np.pi) # we expect this to be negative rough = s21*Signal1D(np.exp(-2j*np.pi*tau_0*s21.x), xraw = s21.x) # construct the model and a circle fitting based error function pm_neg = pm_line_delay(b_tau = (-np.abs(tau_0), 0, np.abs(tau_0))) def errf(v, self, sig1d, _): try: pm_neg.v['tau'] = v[0]*pm_neg.v['tau'] except: pass # return the same result as the previously set parameter! return circle_fit(sig1d*pm_neg(sig1d.x))[-1] # subsample to speed up circle fitting subsample = decimate_by_derivative(rough, N) shgo_opts = {'n': 100, 'iters': 1, 'sampling_method': 'sobol'} pm_neg.fit(subsample, method = 'shgo', errf = errf, opts = shgo_opts) tau_f = -(pm_neg.v['tau'] + tau_0) bounds = (tau_f - .5*np.abs(tau_f), tau_f, tau_f + .5*np.abs(tau_f)) ld_model = pm_line_delay(b_tau = bounds) return s21*Signal1D(np.exp(2j*np.pi*tau_f*s21.x), xraw = s21.x), ld_model
""" processing methods to remove line delay from a resonance author: <NAME> """ import numpy as np import sympy as sp from fitkit import * from fitkit.decimate import * from .circle import * from qutilities import * def pm_line_delay(b_tau = (0, 0, 25/3e8)): """ returns a Parametric1D model for the line delay Params: tau: The line delay Args: Parameter bounds as required by Parametric1D """ tau, f = sp.symbols('tau f') return Parametric1D(sp.exp(-2j*np.pi*tau*f), {'tau': b_tau}) def rm_line_delay(s21, k = 10, N = 201): """ remove the line delay from s21 Args: s21: A Signal1D representation of the s21 data. Using approximately 10*fwhm of the resonance is recommended k: The number of samples from the beggining of the phase response used to estimate the initial gradient before optimiser polishing N: The number of points to decimate s21 by when fitting tau using the non-linear optimiser Returns: s21: The input s21 data with the line delay removed from the phase response model: The Parametric1D model for the line delay """ # unwrap the phase and fit linear model to obtain a starting point for tau phi = np.unwrap(np.angle(s21.values)) p = np.poly1d(np.polyfit(s21.x[:k], phi[:k], 1)) tau_0 = p.c[0]/(2*np.pi) # we expect this to be negative rough = s21*Signal1D(np.exp(-2j*np.pi*tau_0*s21.x), xraw = s21.x) # construct the model and a circle fitting based error function pm_neg = pm_line_delay(b_tau = (-np.abs(tau_0), 0, np.abs(tau_0))) def errf(v, self, sig1d, _): try: pm_neg.v['tau'] = v[0]*pm_neg.v['tau'] except: pass # return the same result as the previously set parameter! return circle_fit(sig1d*pm_neg(sig1d.x))[-1] # subsample to speed up circle fitting subsample = decimate_by_derivative(rough, N) shgo_opts = {'n': 100, 'iters': 1, 'sampling_method': 'sobol'} pm_neg.fit(subsample, method = 'shgo', errf = errf, opts = shgo_opts) tau_f = -(pm_neg.v['tau'] + tau_0) bounds = (tau_f - .5*np.abs(tau_f), tau_f, tau_f + .5*np.abs(tau_f)) ld_model = pm_line_delay(b_tau = bounds) return s21*Signal1D(np.exp(2j*np.pi*tau_f*s21.x), xraw = s21.x), ld_model
en
0.834293
processing methods to remove line delay from a resonance author: <NAME> returns a Parametric1D model for the line delay Params: tau: The line delay Args: Parameter bounds as required by Parametric1D remove the line delay from s21 Args: s21: A Signal1D representation of the s21 data. Using approximately 10*fwhm of the resonance is recommended k: The number of samples from the beggining of the phase response used to estimate the initial gradient before optimiser polishing N: The number of points to decimate s21 by when fitting tau using the non-linear optimiser Returns: s21: The input s21 data with the line delay removed from the phase response model: The Parametric1D model for the line delay # unwrap the phase and fit linear model to obtain a starting point for tau # we expect this to be negative # construct the model and a circle fitting based error function # return the same result as the previously set parameter! # subsample to speed up circle fitting
2.950867
3
src/bo/strategy_bo.py
Asconius/trading-bot
2
6613264
import math from datetime import datetime from decimal import Decimal from typing import Tuple from pandas import DataFrame from src.constants import ZERO from src.dto.attempt_dto import AttemptDTO from src.enums.action_enum import ActionEnum from src.utils.utils import Utils class StrategyBO: # noinspection DuplicatedCode @staticmethod def counter_cyclical(frame: DataFrame, ticker: str, date: datetime, attempt: AttemptDTO) -> Tuple[ActionEnum, Decimal]: end_close: Decimal = frame.at[date, ticker] if attempt is None: attempt = AttemptDTO() start_close: Decimal = Utils.day_delta_value(frame, ticker, date, attempt.distance_buy) if not math.isnan(start_close): percent: Decimal = start_close / end_close if not math.isnan(start_close) and not math.isnan(end_close) and percent > attempt.delta_buy: return ActionEnum.BUY, Utils.number(attempt.amount_buy, end_close) start_close: Decimal = Utils.day_delta_value(frame, ticker, date, attempt.distance_sell) if not math.isnan(start_close): percent: Decimal = end_close / start_close if not math.isnan(start_close) and not math.isnan(end_close) and percent > attempt.delta_sell: return ActionEnum.SELL, Utils.number(attempt.amount_sell, end_close) return ActionEnum.NONE, ZERO
import math from datetime import datetime from decimal import Decimal from typing import Tuple from pandas import DataFrame from src.constants import ZERO from src.dto.attempt_dto import AttemptDTO from src.enums.action_enum import ActionEnum from src.utils.utils import Utils class StrategyBO: # noinspection DuplicatedCode @staticmethod def counter_cyclical(frame: DataFrame, ticker: str, date: datetime, attempt: AttemptDTO) -> Tuple[ActionEnum, Decimal]: end_close: Decimal = frame.at[date, ticker] if attempt is None: attempt = AttemptDTO() start_close: Decimal = Utils.day_delta_value(frame, ticker, date, attempt.distance_buy) if not math.isnan(start_close): percent: Decimal = start_close / end_close if not math.isnan(start_close) and not math.isnan(end_close) and percent > attempt.delta_buy: return ActionEnum.BUY, Utils.number(attempt.amount_buy, end_close) start_close: Decimal = Utils.day_delta_value(frame, ticker, date, attempt.distance_sell) if not math.isnan(start_close): percent: Decimal = end_close / start_close if not math.isnan(start_close) and not math.isnan(end_close) and percent > attempt.delta_sell: return ActionEnum.SELL, Utils.number(attempt.amount_sell, end_close) return ActionEnum.NONE, ZERO
en
0.3074
# noinspection DuplicatedCode
2.457064
2
examples/wisePigs/runMe.py
tyler-utah/PBDD
13
6613265
<reponame>tyler-utah/PBDD #!/usr/bin/python2.7 #<NAME> #Sept 18, 2011 #This example shows the second more complicated Lewis Carroll puzzle. #See wiseYoungPigs.txt for the formula. import sys sys.path.append("../../include/") sys.path.append("../../../../PyBool/include/") import BDD if __name__ == "__main__": #creating and initializing the BDD x = BDD.bdd_init("wiseYoungPigs.txt") #building the tree BDD.ite_build(x) print "Can wise young pigs go up in balloons?" if BDD.sat_count(x) == 0: print "No" else: print "Yes, and here's how:" print "" print BDD.any_sat(x) BDD.dot_bdd(x,"my_dot.dot") #BDD.dot_bdd(x, "myDot.dot")
#!/usr/bin/python2.7 #<NAME> #Sept 18, 2011 #This example shows the second more complicated Lewis Carroll puzzle. #See wiseYoungPigs.txt for the formula. import sys sys.path.append("../../include/") sys.path.append("../../../../PyBool/include/") import BDD if __name__ == "__main__": #creating and initializing the BDD x = BDD.bdd_init("wiseYoungPigs.txt") #building the tree BDD.ite_build(x) print "Can wise young pigs go up in balloons?" if BDD.sat_count(x) == 0: print "No" else: print "Yes, and here's how:" print "" print BDD.any_sat(x) BDD.dot_bdd(x,"my_dot.dot") #BDD.dot_bdd(x, "myDot.dot")
en
0.713934
#!/usr/bin/python2.7 #<NAME> #Sept 18, 2011 #This example shows the second more complicated Lewis Carroll puzzle. #See wiseYoungPigs.txt for the formula. #creating and initializing the BDD #building the tree #BDD.dot_bdd(x, "myDot.dot")
3.460672
3
ABC144/ABC144c copy.py
VolgaKurvar/AtCoder
0
6613266
# ABC144c import math def main(): import sys input = sys.stdin.readline sys.setrecursionlimit(10**6) # map(int,input().split()) n = int(input()) ans = 10**12 for i in range(n//2+1): for j in range(n//2+1): if(i*j == n): ans = min(ans, i+j) print(ans-2) if __name__ == '__main__': main()
# ABC144c import math def main(): import sys input = sys.stdin.readline sys.setrecursionlimit(10**6) # map(int,input().split()) n = int(input()) ans = 10**12 for i in range(n//2+1): for j in range(n//2+1): if(i*j == n): ans = min(ans, i+j) print(ans-2) if __name__ == '__main__': main()
ceb
0.249579
# ABC144c # map(int,input().split())
2.55739
3
example/qubole_presto_api_example.py
sanketsaurav/qds-sdk-py
42
6613267
""" This is the sample code used for submitting a Presto query (PrestoCommand) and getting the result back to local file. Similar way can be followed for HiveCommand etc. """ import sys import string from ConfigParser import SafeConfigParser from qds_sdk.qubole import Qubole from qds_sdk.commands import * import boto import time # Used for generating file name to download the result def get_random_filename(size=10): return "/tmp/result_" + str(int(time.time())) + ".tsv" # Returning content from the file def get_content(filename): with open(filename, 'r') as content_file: content = content_file.read() return content # Executing given query def execute_query(query): if query is None or query == "": return None cmd = PrestoCommand.create(query=query) query_id = str(cmd.id) print "Starting Command with id: " + query_id + "\nProgress: =>", while not PrestoCommand.is_done(cmd.status): print "\b=>", cmd = PrestoCommand.find(cmd.id) time.sleep(5) print cmd.get_log() if PrestoCommand.is_success(cmd.status): print "\nCommand Executed: Completed successfully" else: print "\nCommand Executed: Failed!!!. The status returned is: " + str(cmd.status) return cmd # Downloading the result def get_results(command): if command is None: return None filename = get_random_filename(10) print filename fp = open(filename, 'w') command.get_results(fp, delim="\n") print "Starting Result fetch with Command id: " + str(command.id) + "\nProgress: =>", while not PrestoCommand.is_done(command.status): print "\b=>", time.sleep(5) if PrestoCommand.is_success(command.status): print "\nCommand Executed: Results fetch completed successfully" else: print "\nCommand Executed: Result fetch for original command " + str(command.id) + "Failed!!!. The status returned is: " + str(command.status) fp.close() content = get_content(filename) return content if __name__ == '__main__': # Stting API token Qubole.configure(api_token='<PASSWORD>') get_results(execute_query("select * from default.cities limit 100;"))
""" This is the sample code used for submitting a Presto query (PrestoCommand) and getting the result back to local file. Similar way can be followed for HiveCommand etc. """ import sys import string from ConfigParser import SafeConfigParser from qds_sdk.qubole import Qubole from qds_sdk.commands import * import boto import time # Used for generating file name to download the result def get_random_filename(size=10): return "/tmp/result_" + str(int(time.time())) + ".tsv" # Returning content from the file def get_content(filename): with open(filename, 'r') as content_file: content = content_file.read() return content # Executing given query def execute_query(query): if query is None or query == "": return None cmd = PrestoCommand.create(query=query) query_id = str(cmd.id) print "Starting Command with id: " + query_id + "\nProgress: =>", while not PrestoCommand.is_done(cmd.status): print "\b=>", cmd = PrestoCommand.find(cmd.id) time.sleep(5) print cmd.get_log() if PrestoCommand.is_success(cmd.status): print "\nCommand Executed: Completed successfully" else: print "\nCommand Executed: Failed!!!. The status returned is: " + str(cmd.status) return cmd # Downloading the result def get_results(command): if command is None: return None filename = get_random_filename(10) print filename fp = open(filename, 'w') command.get_results(fp, delim="\n") print "Starting Result fetch with Command id: " + str(command.id) + "\nProgress: =>", while not PrestoCommand.is_done(command.status): print "\b=>", time.sleep(5) if PrestoCommand.is_success(command.status): print "\nCommand Executed: Results fetch completed successfully" else: print "\nCommand Executed: Result fetch for original command " + str(command.id) + "Failed!!!. The status returned is: " + str(command.status) fp.close() content = get_content(filename) return content if __name__ == '__main__': # Stting API token Qubole.configure(api_token='<PASSWORD>') get_results(execute_query("select * from default.cities limit 100;"))
en
0.764827
This is the sample code used for submitting a Presto query (PrestoCommand) and getting the result back to local file. Similar way can be followed for HiveCommand etc. # Used for generating file name to download the result # Returning content from the file # Executing given query # Downloading the result # Stting API token
2.686478
3
cloudseg/models/__init__.py
elrichgro/irccam-pmodwrc
1
6613268
<gh_stars>1-10 from cloudseg.models.unet.unet import UNet from cloudseg.models.deeplab.deeplab import DeepLab def get_model(name, **kwargs): models = {"unet": UNet, "deeplab": DeepLab} return models[name](**kwargs)
from cloudseg.models.unet.unet import UNet from cloudseg.models.deeplab.deeplab import DeepLab def get_model(name, **kwargs): models = {"unet": UNet, "deeplab": DeepLab} return models[name](**kwargs)
none
1
1.715789
2
api/Payload/runner.py
DeltaMeter/compilebox
0
6613269
import sys import subprocess import os from multiprocessing import Pool from optparse import OptionParser if not os.path.exists('results'): os.makedirs('results') errors = open('results/errors.txt', 'w') interpreter = '' def shellExec(filename): print filename testResult = subprocess.call([interpreter + " " + filename], stdout=subprocess.PIPE, stderr=errors, shell=True) if testResult == 0: return filename return "" def runPrograms(files, options): #interpreted languages are already 'compiled', compileResult = 0 if options.compiler: compileResult = subprocess.call(options.compiler + ' ' + options.compileTargets, stdout=subprocess.PIPE, stderr=errors, shell=True) print "Compile Result: %s" % compileResult #compilation was successful or unnecessary, so now we run the code if compileResult == 0: testPool = Pool(processes=8) tests = testPool.map_async(shellExec, files) passedTests = filter((lambda x: x != ""), tests.get(timeout=1)) failedTests = filter((lambda x: x not in passedTests), files) print "Passed " + ' '.join(passedTests) print "Failed " + ' '.join(failedTests) passedResults = open('results/passed.txt', 'w') passedResults.write(' '.join(passedTests)) passedResults.close() failedResults = open('results/failed.txt', 'w') failedResults.write(' '.join(failedTests)) failedResults.close() errors.close() open('results/completed', 'w').close() if __name__ == "__main__": optParser = OptionParser() optParser.add_option('-c', '--compiler', dest='compiler', help='compiler, i.e. javac') optParser.add_option('-t', '--compileTargets', dest='compileTargets', help='compile targets, i.e., *.java') optParser.add_option('-i', '--interpreter', dest='interpreter', help='interpreter, i.e. java') (options, args) = optParser.parse_args() interpreter = options.interpreter runPrograms(args, options)
import sys import subprocess import os from multiprocessing import Pool from optparse import OptionParser if not os.path.exists('results'): os.makedirs('results') errors = open('results/errors.txt', 'w') interpreter = '' def shellExec(filename): print filename testResult = subprocess.call([interpreter + " " + filename], stdout=subprocess.PIPE, stderr=errors, shell=True) if testResult == 0: return filename return "" def runPrograms(files, options): #interpreted languages are already 'compiled', compileResult = 0 if options.compiler: compileResult = subprocess.call(options.compiler + ' ' + options.compileTargets, stdout=subprocess.PIPE, stderr=errors, shell=True) print "Compile Result: %s" % compileResult #compilation was successful or unnecessary, so now we run the code if compileResult == 0: testPool = Pool(processes=8) tests = testPool.map_async(shellExec, files) passedTests = filter((lambda x: x != ""), tests.get(timeout=1)) failedTests = filter((lambda x: x not in passedTests), files) print "Passed " + ' '.join(passedTests) print "Failed " + ' '.join(failedTests) passedResults = open('results/passed.txt', 'w') passedResults.write(' '.join(passedTests)) passedResults.close() failedResults = open('results/failed.txt', 'w') failedResults.write(' '.join(failedTests)) failedResults.close() errors.close() open('results/completed', 'w').close() if __name__ == "__main__": optParser = OptionParser() optParser.add_option('-c', '--compiler', dest='compiler', help='compiler, i.e. javac') optParser.add_option('-t', '--compileTargets', dest='compileTargets', help='compile targets, i.e., *.java') optParser.add_option('-i', '--interpreter', dest='interpreter', help='interpreter, i.e. java') (options, args) = optParser.parse_args() interpreter = options.interpreter runPrograms(args, options)
en
0.850634
#interpreted languages are already 'compiled', #compilation was successful or unnecessary, so now we run the code
2.66102
3
SBCorpus-py/SBCorpus-py/reader.py
johnwdubois/rezonator_v2
3
6613270
#!/usr/bin/python3 #from __future__ import print_function #from xml import * import SBCorpus from SBCorpus.metadata import metadata import re class SBCorpusReader(): def __init__(self): #self.corpus = SBCorpus.xmlreader() self.corpus = SBCorpus.corpus self.positions={ 'ID':0, 'NAME':1, 'GENDER':2, 'AGE':3, 'HOMETOWN':4, 'HOMESTATE':5, 'CURRENTSTATE':6, 'EDUCATION':7, 'YEARSEDUCATION':8, 'OCCUPATION':9, 'ETHNICITY':10, 'TEXTS':11 } def copy_object(self, obj): if type(obj) in (str,int,float): output=obj elif type(obj)==list: output=[] for item in obj: output+=[self.copy_object(item)] elif type(obj)==tuple: output=[] for item in obj: output+=[self.copy_object(item)] output=tuple(output) elif type(obj)==dict: output={} for key in list(obj): output[key]=self.copy_object(obj[key]) return output def copy_part(self, output, source, text=None, turn=None, IU=None, word=None, tiers=None): if text != None: if turn != None: if text not in output: output[text] = {'name':source[text]['name']} if IU != None: if turn not in output[text]: output[text][turn] = {'ID':source[text][turn]['ID']} if word != None: if IU not in output[text][turn]: output[text][turn][IU] = {'start':source[text][turn][IU]['start'], 'end':source[text][turn][IU]['end']} if tiers != None: if word not in output[text][turn][IU]: output[text][turn][IU][word] = {} for tier in tiers: if word not in output[text][turn][IU][word]: output[text][turn][IU][word][tier] = self.copy_object(source[text][turn][IU][word][tier]) elif tiers == None: output[text][turn][IU][word] = self.copy_object(source[text][turn][IU][word]) elif word == None: output[text][turn][IU] = self.copy_object(source[text][turn][IU]) elif IU == None: output[text][turn] = self.copy_object(source[text][turn]) elif turn == None: output[text] = self.copy_object(source[text]) elif text == None: output = self.copy_object(source) return output def get_range(self, terms): if ':' in terms: nrange=[] for i in range(int(terms.split(':')[0]),int(terms.split(':')[1])+1): nrange.append(i) terms = nrange else: if re.match('^\d*$', terms) == None: terms = [terms] else: terms = [int(terms)] return terms def get_parameters(self, identifier, negative=False, capsearch=True): identifiers = {} remove = {} if type(identifier) in [list,tuple]: return identifier elif type(identifier) in [int, float]: return [identifier] if ',' in identifier: identifier = identifier.split(',') elif type(identifier) == str: identifier = [identifier] for parameter in identifier: if '!=' in parameter: search,terms = parameter.split('!=') if capsearch == True: search = search.upper() terms = self.get_range(terms) if search not in remove: remove[search] = terms else: remove[search] += terms elif '=' in parameter: search,terms = parameter.split('=') if capsearch == True: search = search.upper() terms = self.get_range(terms) if search not in identifiers: identifiers[search] = terms else: identifiers[search] += terms else: if 'generic' not in identifiers: identifiers['generic'] = self.get_range(parameter) else: identifiers['generic'] += self.get_range(parameter) if 'generic' in identifiers: return identifiers['generic'] else: if negative == True: return identifiers, remove else: return identifiers def generator(self, extract, level): output = [] for text in extract: if type(text) == int: if level == 'text': output += [text] else: for turn in extract[text]: if type(turn) == int: if level == 'turn': output += [(text, turn)] else: for IU in extract[text][turn]: if type(IU) == int: if level == 'IU': output += [(text, turn, IU)] else: for word in extract[text][turn][IU]: if type(word) == int: if level == 'word': output += [(text, turn, IU, word)] output.sort() return output def getParticipants(self, identifier='all', info='all'): if identifier != 'all': identifiers,remove = self.get_parameters(identifier, negative=True) containing = [] outtakes=[] for key in identifiers: for participant in metadata: for term in identifiers[key]: pos = participant[self.positions[key]] if type(pos) in [int,float,str]: if term == pos: containing.append(participant) elif type(pos) in [list,tuple]: if term in pos: containing.append(participant) else: outtakes.append(participant) output = [] for participant in containing: for key in remove: for term in remove[key]: pos = participant[self.positions[key]] if type(pos) in [int,float,str]: if term == pos: outtakes.append(participant) elif type(pos) in [list,tuple]: if term in pos: outtakes.append(participant) else: output=[] outtakes=[] containing = self.copy_object(metadata) for participant in containing: if participant not in outtakes: output += [participant] if info == 'all': return output else: output = [x[self.positions[info]] for x in output] newoutput = [] for element in output: if type(element) == list: newoutput += [e for e in element if e not in newoutput] elif element not in newoutput: newoutput += [element] return newoutput def printParticipants(self, identifier): participants = self.getParticipants(identifier, info='all') output = [] for key in self.positions: output.append(key) for participant in participants: for key in output: value = participant[self.positions[key]] if type(value) in [int,float]: value = str(value) elif type(value) == list: value = ', '.join([str(v) for v in value]) print (key+': '+value) print() def format_time(self, flt, decimal=3): output = str(flt) if decimal > 0: if '.' not in output: output += '.' before = output.split('.')[0] after = output.split('.')[1] zeros = decimal - len(after) if zeros >= 0: output += '0'*zeros return output elif zeros < 0: if int(after[zeros]) < 5: return output[:zeros] else: after = str(int(after[:zeros])+1) if len(after) > decimal: return str(int(before)+1) + '.' + after[1:] else: return before + '.' + after def getTexts(self, subset=None, textlist='all', participants='all'): if subset == None: subset = self.copy_object(self.corpus) output = {} if textlist != 'all': textlist = self.get_parameters(textlist) else: textlist = [i for i in range(1,61)] if participants == 'all': ppl = self.getParticipants(info='ID') ppl = self.getParticipants(participants, 'TEXTS') for txt in textlist: if txt in ppl and txt in subset: output[txt] = self.copy_object(subset[txt]) return output def getTurns(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='any', afterTurn='any', beforeTurn='any', offset=0, before='any', after='any', at='any', minlength='any', maxlength='any'): subset = self.getTexts(subset=subset, textlist=textlist, participants=participants) if (containing,IUlist) != ('any','all'): IUs = self.getIUs(subset=subset, IUlist=IUlist, containing=containing) else: IUs=subset list(IUs) turns = {} for text,turn in self.generator(IUs, 'turn'): turns = self.copy_part(turns, subset, text, turn) output = {} if afterTurn != 'any': turns_after = {} for text,turn in self.generator(afterTurn, 'turn'): if text in turns: if turn+1 in turns[text]: turns_after = self.copy_part(turns_after, turns, text, turn+1) turns = turns_after del turns_after if beforeTurn != 'any': turns_before = {} for text,turn in self.generator(beforeTurn, 'turn'): if text in turns: if turn-1 in turns[text]: turns_before = self.copy_part(turns_before, turns, text, turn-1) turns = turns_before del turns_before IDs = [0] + self.getParticipants(participants, 'ID') if turnlist != 'all': turnlist = self.get_parameters(turnlist) for text,turn in self.generator(turns,'turn'): accept = True if turn+offset in turns[text]: if turns[text][turn]['ID'] not in IDs: accept = False if turnlist != 'all': if turn + offset not in turnlist: accept = False ius = [iu for iu in turns[text][turn+offset] if (type(iu) == int)] if ius != []: start = turns[text][turn+offset][min(ius)]['start'] end = turns[text][turn+offset][max(ius)]['end'] if type(maxlength) in [int, float]: if end - start > maxlength: accept = False if type(minlength) in [int, float]: if end - start < minlength: accept = False if type(at) in [int, float]: if at < start or at > end: accept = False if type(after) in [int, float]: if start < after: accept = False if type(before) in [int, float]: if end > before: accept = False elif (maxlength,minlength,at,after,before) != ('any','any','any','any','any'): accept = False print(ius) else: accept = False if accept == True: output = self.copy_part(output,turns,text,turn+offset) return output def getIUs(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='any', after='any', at='any', before='any', maxlength='any', minlength='any'): subset = self.getTexts(subset=subset,participants=participants,textlist=textlist) if (turnlist,participants) != ('any','all'): subset = self.getTurns(subset, turnlist=turnlist, participants=participants) IUs = {} if containing != 'any': words = self.getWords(subset=subset,containing=containing) for text,turn,IU in self.generator(words,'IU'): IUs = self.copy_part(IUs, subset, text, turn, IU) subset = IUs del IUs output={} if IUlist != 'all': IUlist = self.get_parameters(IUlist) for text,turn,IU in self.generator(subset, 'IU'): iu = subset[text][turn][IU] accept = True if IUlist != 'all': if IU not in IUlist: accept = False if type(maxlength) in [int, float]: if iu['end'] - iu['start'] > maxlength: accept = False if type(minlength) in [int, float]: if iu['end'] - iu['start'] < minlength: accept = False if type(at) in [int, float]: if at < iu['start'] or at > iu['end']: accept = False if type(after) in [int, float]: if iu['start'] < after: accept = False if type(before) in [int, float]: if iu['end'] > before: accept = False if accept == True: output = self.copy_part(output, subset, text, turn, IU) return output def getWords(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='all', tier='dt', aslist=False, unit='word', fromstart='any', fromend='any'): output = {} subset = self.getIUs(subset=subset, textlist=textlist, turnlist=turnlist, IUlist=IUlist, participants=participants) if containing != 'all': containing,remove = self.get_parameters(containing, negative=True) for text,turn,IU,word in self.generator(subset, 'word'): accept = True if type(fromstart) == int: if word > fromstart: accept = False if type(fromend) == int: for i in range(0,fromend): if word+i not in subset[text][turn][IU]: accept = False if containing != 'all': for search in containing: for term in containing[search]: if search.lower() in ['dt','word']: if term[0:2] == "r'" and term[-1] == "'": if re.match(term[2:-1],subset[text][turn][IU][word][search.lower()]) == None: accept = False elif term != subset[text][turn][IU][word][search.lower()]: accept = False elif search.lower() == 'manner': if term not in subset[text][turn][IU][word]['manner'] and term.upper() not in subset[text][turn][IU][word]['manner']: accept = False elif search == 'POS': if term != subset[text][turn][IU][word]['POS']: accept = False for search in remove: for term in remove[search]: if search.lower() in ['dt','word']: if term[0:2] == "r'" and term[-1] == "'": if re.match(term[2:-1],subset[text][turn][IU][word][search.lower()]) != None: accept = False elif term == subset[text][turn][IU][word][search.lower()]: accept = False elif search.lower() == 'manner': if term not in subset[text][turn][IU][word]['manner'] and term.upper() not in subset[text][turn][IU][word]['manner']: accept = False elif search == 'POS': if term != subset[text][turn][IU][word]['POS']: accept = False if accept == True: output = self.copy_part(output, subset, text, turn, IU, word) if aslist == True: if unit == 'IU': output = self.listWords(output, tier=tier, IUs=True) elif unit == 'word': output = self.listWords(output, tier=tier, IUs=False) return output def combineSubsets(self, excerpt1, excerpt2=None): if type(excerpt1) == list: output = self.copy_object(excerpt1[0]) for subset in excerpt1[1:]: for text,turn,IU,word in self.generator(subset, 'word'): output = self.copy_part(output,subset,text,turn,IU,word) elif type(excerpt1) == dict and excerpt2 != None: output = self.copy_object(excerpt1) for text,turn,IU,word in self.generator(excerpt2, 'word'): output = self.copy_part(output,excerpt2,text,turn,IU,word) return output def getWindow(self, subset='all', castunit='IU', outputunit='IU', size=10, shift=5): output = {} complete = [0] if subset == 'all': subset = self.getTexts() if castunit in ['millisecond', 'ms']: size = float(size)/1000.000 shift = float(shift)/1000.000 minus = 0.001 castunit = 's' if outputunit == 'word': print('No data for timing of words. Returning output in IUs.') return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift) elif castunit in ['second', 's']: minus = 1.000 castunit = 's' if outputunit == 'word': print('No data for timing of words. Returning output in IUs.') return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift) elif castunit in ['minute', 'm']: size = float(size)*60.000 shift = float(shift)*60.000 minus = 60.000 castunit = 's' if outputunit == 'word': print('No data for timing of words. Returning output in IUs.') return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift) elif castunit == 'word': if outputunit == 'IU': words = self.getWindow(subset=subset, castunit=castunit, outputunit='word', size=size, shift=shift) for window in range(0,len(words)): if window+1 not in output: output[window+1] = {} for text,turn,iu in self.generator(words[window], 'IU'): output[window+1] = self.copy_part(output[window+1], subset, text, turn, iu) elif outputunit == 'turn': words = self.getWindows(subset=subset, castunit=castunit, outputunit='word', size=size, shift=shift) for window in range(0,len(words)): if window+1 not in output: output[window+1] = {} for text,turn in self.generator(words[window], 'turn'): output[window+1] = self.copy_part(output[window+1], subset, text, turn) elif castunit == 'IU': if outputunit == 'turn': IUs = self.getWindows(subset=subset, castunit=castunit, outputunit='IU', size=size, shift=shift) for window in range(0,len(IUs)): if window+1 not in output: output[window+1] = {} for text,turn in self.generator(IUs[window], 'turn'): output[window+1] = self.copy_part(output[window+1], subset, text, turn) if outputunit == 'word': return self.getWindow(subset=subset, castunit=castunit, outputunit='IU', size=size, shift=shift) elif castunit == 'turn': if outputunit in ('IU','word'): return self.getWindow(subset=subset, castunit=castunit, outputunit='turn', size=size, shift=shift) point = size for text in self.generator(subset,'text'): if outputunit == 'word': if castunit == 'word': complete = [0] wordno=0 for txt,trn,iu,wd in self.generator({text:subset[text]},'word'): end = False i = max(complete)+1 while end == False: number = size*2 if i not in output: output[i] = {} isize = len(self.listWords(words=output[i], IUs=False)) point = size + ((i-1) * shift) if number == isize or wordno > size + ((i-1) * shift) + size: complete.append(i) elif point + size >= wordno and point - size <= wordno: output[i] = self.copy_part(output[i], subset, text, trn, iu, wd) else: end = True i+=1 wordno += 1 elif outputunit == 'IU': i = 1 end = False if castunit == 'IU': while end == False: minIU = str(point - size) maxIU = str(point + size - 1) IUrange = ':'.join([minIU,maxIU]) window = self.getIUs(subset=subset,textlist=text,IUlist=IUrange) if window == {}: end = True else: output[i] = window i+=1 point += shift elif castunit == 's': while end == False: minIU = point - size maxIU = point + size - minus window = self.combineSubsets([self.getIUs(subset=subset,textlist=text,after=minIU,before=maxIU), self.getIUs(subset=subset,textlist=text,at=minIU), self.getIUs(subset=subset,textlist=text,at=maxIU)]) if window == {}: end = True else: output[i] = window i+=1 point += shift elif outputunit == 'turn': i = 1 end = False if castunit == 's': while end == False: minturn = point - size maxturn = point + size - minus window = self.combineSubsets([self.getTurns(subset=subset,textlist=text,after=minturn,before=maxturn), self.getTurns(subset=subset,textlist=text,at=minturn), self.getTurns(subset=subset,textlist=text,at=maxturn)]) if window == {}: end = True else: output[i] = window i+=1 point += shift return [output[window] for window in output if output[window] != {}] def printSubset(self, subset, title=True, tier='dt', timestamps=True, labels=True, numberedlines=False, decimal=3): output = '' for text in subset: output += '\n' header = subset[text]['name']+' (' turns = [t for t in list(subset[text]) if (type(t) == int)] turns.sort() turn1 = min(turns) IUlist = [iu for iu in list(subset[text][turn1]) if (type(iu) == int)] IUlist.sort() IU1 = min(IUlist) lstturn = max(turns) IUlist = [iu for iu in list(subset[text][lstturn]) if (type(iu) == int)] lstIU = max(IUlist) header += self.format_time(subset[text][turn1][IU1]['start'], decimal) + ' - ' + self.format_time(subset[text][lstturn][lstIU]['end'], decimal) + ')\n' if title == True: output += header for turn in turns: IUlist = [i for i in subset[text][turn] if (type(i) == int)] IUlist.sort() label = subset[text][turn]['ID'] if label == 0: label = 'OTHER' else: label = self.getParticipants('ID='+str(label),'NAME')[-1] IU1 = min(IUlist) turn_start = self.format_time(subset[text][turn][IU1]['start'], decimal) turn_end = self.format_time(subset[text][turn][IU1]['end'], decimal) if numberedlines == True: output += str(IU1)+'\t' if timestamps == True: output += turn_start + '\t' + turn_end + '\t' if labels == True: output += label + ';\t' IUtext = [] words = [wd for wd in list(subset[text][turn][IU1]) if (type(wd) == int)] words.sort() for word in words: IUtext += [subset[text][turn][IU1][word][tier]] output += ' '.join(IUtext) + '\n' if len(IUlist) > 1: for IU in IUlist[1:]: IUtext = [] turn_start = self.format_time(subset[text][turn][IU]['start'], decimal) turn_end = self.format_time(subset[text][turn][IU]['end'], decimal) if numberedlines == True: output += str(IU)+'\t' if timestamps == True: output += turn_start + '\t' + turn_end + '\t' if labels == True: output += '\t' IUtext = [] words = [wd for wd in list(subset[text][turn][IU]) if (type(wd) == int)] words.sort() for word in words: IUtext += [subset[text][turn][IU][word][tier]] for word in IUtext: pass output += ' '.join(IUtext) + '\n' print(output) def listWords(self, words='all', tier='dt', IUs=True): wordlist=[] IUlist=[] if words == 'all': words == getTexts() prevIU = int for text,turn,IU,word in self.generator(words,'word'): if IUs == True: if prevIU != IU: if prevIU != int: wordlist += [[IUlist]] IUlist = [] IUlist += [words[text][turn][IU][word][tier]] prevIU = IU else: wordlist += [words[text][turn][IU][word][tier]] if IUlist != []: wordlist += IUlist return wordlist
#!/usr/bin/python3 #from __future__ import print_function #from xml import * import SBCorpus from SBCorpus.metadata import metadata import re class SBCorpusReader(): def __init__(self): #self.corpus = SBCorpus.xmlreader() self.corpus = SBCorpus.corpus self.positions={ 'ID':0, 'NAME':1, 'GENDER':2, 'AGE':3, 'HOMETOWN':4, 'HOMESTATE':5, 'CURRENTSTATE':6, 'EDUCATION':7, 'YEARSEDUCATION':8, 'OCCUPATION':9, 'ETHNICITY':10, 'TEXTS':11 } def copy_object(self, obj): if type(obj) in (str,int,float): output=obj elif type(obj)==list: output=[] for item in obj: output+=[self.copy_object(item)] elif type(obj)==tuple: output=[] for item in obj: output+=[self.copy_object(item)] output=tuple(output) elif type(obj)==dict: output={} for key in list(obj): output[key]=self.copy_object(obj[key]) return output def copy_part(self, output, source, text=None, turn=None, IU=None, word=None, tiers=None): if text != None: if turn != None: if text not in output: output[text] = {'name':source[text]['name']} if IU != None: if turn not in output[text]: output[text][turn] = {'ID':source[text][turn]['ID']} if word != None: if IU not in output[text][turn]: output[text][turn][IU] = {'start':source[text][turn][IU]['start'], 'end':source[text][turn][IU]['end']} if tiers != None: if word not in output[text][turn][IU]: output[text][turn][IU][word] = {} for tier in tiers: if word not in output[text][turn][IU][word]: output[text][turn][IU][word][tier] = self.copy_object(source[text][turn][IU][word][tier]) elif tiers == None: output[text][turn][IU][word] = self.copy_object(source[text][turn][IU][word]) elif word == None: output[text][turn][IU] = self.copy_object(source[text][turn][IU]) elif IU == None: output[text][turn] = self.copy_object(source[text][turn]) elif turn == None: output[text] = self.copy_object(source[text]) elif text == None: output = self.copy_object(source) return output def get_range(self, terms): if ':' in terms: nrange=[] for i in range(int(terms.split(':')[0]),int(terms.split(':')[1])+1): nrange.append(i) terms = nrange else: if re.match('^\d*$', terms) == None: terms = [terms] else: terms = [int(terms)] return terms def get_parameters(self, identifier, negative=False, capsearch=True): identifiers = {} remove = {} if type(identifier) in [list,tuple]: return identifier elif type(identifier) in [int, float]: return [identifier] if ',' in identifier: identifier = identifier.split(',') elif type(identifier) == str: identifier = [identifier] for parameter in identifier: if '!=' in parameter: search,terms = parameter.split('!=') if capsearch == True: search = search.upper() terms = self.get_range(terms) if search not in remove: remove[search] = terms else: remove[search] += terms elif '=' in parameter: search,terms = parameter.split('=') if capsearch == True: search = search.upper() terms = self.get_range(terms) if search not in identifiers: identifiers[search] = terms else: identifiers[search] += terms else: if 'generic' not in identifiers: identifiers['generic'] = self.get_range(parameter) else: identifiers['generic'] += self.get_range(parameter) if 'generic' in identifiers: return identifiers['generic'] else: if negative == True: return identifiers, remove else: return identifiers def generator(self, extract, level): output = [] for text in extract: if type(text) == int: if level == 'text': output += [text] else: for turn in extract[text]: if type(turn) == int: if level == 'turn': output += [(text, turn)] else: for IU in extract[text][turn]: if type(IU) == int: if level == 'IU': output += [(text, turn, IU)] else: for word in extract[text][turn][IU]: if type(word) == int: if level == 'word': output += [(text, turn, IU, word)] output.sort() return output def getParticipants(self, identifier='all', info='all'): if identifier != 'all': identifiers,remove = self.get_parameters(identifier, negative=True) containing = [] outtakes=[] for key in identifiers: for participant in metadata: for term in identifiers[key]: pos = participant[self.positions[key]] if type(pos) in [int,float,str]: if term == pos: containing.append(participant) elif type(pos) in [list,tuple]: if term in pos: containing.append(participant) else: outtakes.append(participant) output = [] for participant in containing: for key in remove: for term in remove[key]: pos = participant[self.positions[key]] if type(pos) in [int,float,str]: if term == pos: outtakes.append(participant) elif type(pos) in [list,tuple]: if term in pos: outtakes.append(participant) else: output=[] outtakes=[] containing = self.copy_object(metadata) for participant in containing: if participant not in outtakes: output += [participant] if info == 'all': return output else: output = [x[self.positions[info]] for x in output] newoutput = [] for element in output: if type(element) == list: newoutput += [e for e in element if e not in newoutput] elif element not in newoutput: newoutput += [element] return newoutput def printParticipants(self, identifier): participants = self.getParticipants(identifier, info='all') output = [] for key in self.positions: output.append(key) for participant in participants: for key in output: value = participant[self.positions[key]] if type(value) in [int,float]: value = str(value) elif type(value) == list: value = ', '.join([str(v) for v in value]) print (key+': '+value) print() def format_time(self, flt, decimal=3): output = str(flt) if decimal > 0: if '.' not in output: output += '.' before = output.split('.')[0] after = output.split('.')[1] zeros = decimal - len(after) if zeros >= 0: output += '0'*zeros return output elif zeros < 0: if int(after[zeros]) < 5: return output[:zeros] else: after = str(int(after[:zeros])+1) if len(after) > decimal: return str(int(before)+1) + '.' + after[1:] else: return before + '.' + after def getTexts(self, subset=None, textlist='all', participants='all'): if subset == None: subset = self.copy_object(self.corpus) output = {} if textlist != 'all': textlist = self.get_parameters(textlist) else: textlist = [i for i in range(1,61)] if participants == 'all': ppl = self.getParticipants(info='ID') ppl = self.getParticipants(participants, 'TEXTS') for txt in textlist: if txt in ppl and txt in subset: output[txt] = self.copy_object(subset[txt]) return output def getTurns(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='any', afterTurn='any', beforeTurn='any', offset=0, before='any', after='any', at='any', minlength='any', maxlength='any'): subset = self.getTexts(subset=subset, textlist=textlist, participants=participants) if (containing,IUlist) != ('any','all'): IUs = self.getIUs(subset=subset, IUlist=IUlist, containing=containing) else: IUs=subset list(IUs) turns = {} for text,turn in self.generator(IUs, 'turn'): turns = self.copy_part(turns, subset, text, turn) output = {} if afterTurn != 'any': turns_after = {} for text,turn in self.generator(afterTurn, 'turn'): if text in turns: if turn+1 in turns[text]: turns_after = self.copy_part(turns_after, turns, text, turn+1) turns = turns_after del turns_after if beforeTurn != 'any': turns_before = {} for text,turn in self.generator(beforeTurn, 'turn'): if text in turns: if turn-1 in turns[text]: turns_before = self.copy_part(turns_before, turns, text, turn-1) turns = turns_before del turns_before IDs = [0] + self.getParticipants(participants, 'ID') if turnlist != 'all': turnlist = self.get_parameters(turnlist) for text,turn in self.generator(turns,'turn'): accept = True if turn+offset in turns[text]: if turns[text][turn]['ID'] not in IDs: accept = False if turnlist != 'all': if turn + offset not in turnlist: accept = False ius = [iu for iu in turns[text][turn+offset] if (type(iu) == int)] if ius != []: start = turns[text][turn+offset][min(ius)]['start'] end = turns[text][turn+offset][max(ius)]['end'] if type(maxlength) in [int, float]: if end - start > maxlength: accept = False if type(minlength) in [int, float]: if end - start < minlength: accept = False if type(at) in [int, float]: if at < start or at > end: accept = False if type(after) in [int, float]: if start < after: accept = False if type(before) in [int, float]: if end > before: accept = False elif (maxlength,minlength,at,after,before) != ('any','any','any','any','any'): accept = False print(ius) else: accept = False if accept == True: output = self.copy_part(output,turns,text,turn+offset) return output def getIUs(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='any', after='any', at='any', before='any', maxlength='any', minlength='any'): subset = self.getTexts(subset=subset,participants=participants,textlist=textlist) if (turnlist,participants) != ('any','all'): subset = self.getTurns(subset, turnlist=turnlist, participants=participants) IUs = {} if containing != 'any': words = self.getWords(subset=subset,containing=containing) for text,turn,IU in self.generator(words,'IU'): IUs = self.copy_part(IUs, subset, text, turn, IU) subset = IUs del IUs output={} if IUlist != 'all': IUlist = self.get_parameters(IUlist) for text,turn,IU in self.generator(subset, 'IU'): iu = subset[text][turn][IU] accept = True if IUlist != 'all': if IU not in IUlist: accept = False if type(maxlength) in [int, float]: if iu['end'] - iu['start'] > maxlength: accept = False if type(minlength) in [int, float]: if iu['end'] - iu['start'] < minlength: accept = False if type(at) in [int, float]: if at < iu['start'] or at > iu['end']: accept = False if type(after) in [int, float]: if iu['start'] < after: accept = False if type(before) in [int, float]: if iu['end'] > before: accept = False if accept == True: output = self.copy_part(output, subset, text, turn, IU) return output def getWords(self, subset=None, textlist='all', turnlist='all', IUlist='all', participants='all', containing='all', tier='dt', aslist=False, unit='word', fromstart='any', fromend='any'): output = {} subset = self.getIUs(subset=subset, textlist=textlist, turnlist=turnlist, IUlist=IUlist, participants=participants) if containing != 'all': containing,remove = self.get_parameters(containing, negative=True) for text,turn,IU,word in self.generator(subset, 'word'): accept = True if type(fromstart) == int: if word > fromstart: accept = False if type(fromend) == int: for i in range(0,fromend): if word+i not in subset[text][turn][IU]: accept = False if containing != 'all': for search in containing: for term in containing[search]: if search.lower() in ['dt','word']: if term[0:2] == "r'" and term[-1] == "'": if re.match(term[2:-1],subset[text][turn][IU][word][search.lower()]) == None: accept = False elif term != subset[text][turn][IU][word][search.lower()]: accept = False elif search.lower() == 'manner': if term not in subset[text][turn][IU][word]['manner'] and term.upper() not in subset[text][turn][IU][word]['manner']: accept = False elif search == 'POS': if term != subset[text][turn][IU][word]['POS']: accept = False for search in remove: for term in remove[search]: if search.lower() in ['dt','word']: if term[0:2] == "r'" and term[-1] == "'": if re.match(term[2:-1],subset[text][turn][IU][word][search.lower()]) != None: accept = False elif term == subset[text][turn][IU][word][search.lower()]: accept = False elif search.lower() == 'manner': if term not in subset[text][turn][IU][word]['manner'] and term.upper() not in subset[text][turn][IU][word]['manner']: accept = False elif search == 'POS': if term != subset[text][turn][IU][word]['POS']: accept = False if accept == True: output = self.copy_part(output, subset, text, turn, IU, word) if aslist == True: if unit == 'IU': output = self.listWords(output, tier=tier, IUs=True) elif unit == 'word': output = self.listWords(output, tier=tier, IUs=False) return output def combineSubsets(self, excerpt1, excerpt2=None): if type(excerpt1) == list: output = self.copy_object(excerpt1[0]) for subset in excerpt1[1:]: for text,turn,IU,word in self.generator(subset, 'word'): output = self.copy_part(output,subset,text,turn,IU,word) elif type(excerpt1) == dict and excerpt2 != None: output = self.copy_object(excerpt1) for text,turn,IU,word in self.generator(excerpt2, 'word'): output = self.copy_part(output,excerpt2,text,turn,IU,word) return output def getWindow(self, subset='all', castunit='IU', outputunit='IU', size=10, shift=5): output = {} complete = [0] if subset == 'all': subset = self.getTexts() if castunit in ['millisecond', 'ms']: size = float(size)/1000.000 shift = float(shift)/1000.000 minus = 0.001 castunit = 's' if outputunit == 'word': print('No data for timing of words. Returning output in IUs.') return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift) elif castunit in ['second', 's']: minus = 1.000 castunit = 's' if outputunit == 'word': print('No data for timing of words. Returning output in IUs.') return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift) elif castunit in ['minute', 'm']: size = float(size)*60.000 shift = float(shift)*60.000 minus = 60.000 castunit = 's' if outputunit == 'word': print('No data for timing of words. Returning output in IUs.') return self.getWindow(subset=subset,castunit=castunit,outputunit='IU',size=size,shift=shift) elif castunit == 'word': if outputunit == 'IU': words = self.getWindow(subset=subset, castunit=castunit, outputunit='word', size=size, shift=shift) for window in range(0,len(words)): if window+1 not in output: output[window+1] = {} for text,turn,iu in self.generator(words[window], 'IU'): output[window+1] = self.copy_part(output[window+1], subset, text, turn, iu) elif outputunit == 'turn': words = self.getWindows(subset=subset, castunit=castunit, outputunit='word', size=size, shift=shift) for window in range(0,len(words)): if window+1 not in output: output[window+1] = {} for text,turn in self.generator(words[window], 'turn'): output[window+1] = self.copy_part(output[window+1], subset, text, turn) elif castunit == 'IU': if outputunit == 'turn': IUs = self.getWindows(subset=subset, castunit=castunit, outputunit='IU', size=size, shift=shift) for window in range(0,len(IUs)): if window+1 not in output: output[window+1] = {} for text,turn in self.generator(IUs[window], 'turn'): output[window+1] = self.copy_part(output[window+1], subset, text, turn) if outputunit == 'word': return self.getWindow(subset=subset, castunit=castunit, outputunit='IU', size=size, shift=shift) elif castunit == 'turn': if outputunit in ('IU','word'): return self.getWindow(subset=subset, castunit=castunit, outputunit='turn', size=size, shift=shift) point = size for text in self.generator(subset,'text'): if outputunit == 'word': if castunit == 'word': complete = [0] wordno=0 for txt,trn,iu,wd in self.generator({text:subset[text]},'word'): end = False i = max(complete)+1 while end == False: number = size*2 if i not in output: output[i] = {} isize = len(self.listWords(words=output[i], IUs=False)) point = size + ((i-1) * shift) if number == isize or wordno > size + ((i-1) * shift) + size: complete.append(i) elif point + size >= wordno and point - size <= wordno: output[i] = self.copy_part(output[i], subset, text, trn, iu, wd) else: end = True i+=1 wordno += 1 elif outputunit == 'IU': i = 1 end = False if castunit == 'IU': while end == False: minIU = str(point - size) maxIU = str(point + size - 1) IUrange = ':'.join([minIU,maxIU]) window = self.getIUs(subset=subset,textlist=text,IUlist=IUrange) if window == {}: end = True else: output[i] = window i+=1 point += shift elif castunit == 's': while end == False: minIU = point - size maxIU = point + size - minus window = self.combineSubsets([self.getIUs(subset=subset,textlist=text,after=minIU,before=maxIU), self.getIUs(subset=subset,textlist=text,at=minIU), self.getIUs(subset=subset,textlist=text,at=maxIU)]) if window == {}: end = True else: output[i] = window i+=1 point += shift elif outputunit == 'turn': i = 1 end = False if castunit == 's': while end == False: minturn = point - size maxturn = point + size - minus window = self.combineSubsets([self.getTurns(subset=subset,textlist=text,after=minturn,before=maxturn), self.getTurns(subset=subset,textlist=text,at=minturn), self.getTurns(subset=subset,textlist=text,at=maxturn)]) if window == {}: end = True else: output[i] = window i+=1 point += shift return [output[window] for window in output if output[window] != {}] def printSubset(self, subset, title=True, tier='dt', timestamps=True, labels=True, numberedlines=False, decimal=3): output = '' for text in subset: output += '\n' header = subset[text]['name']+' (' turns = [t for t in list(subset[text]) if (type(t) == int)] turns.sort() turn1 = min(turns) IUlist = [iu for iu in list(subset[text][turn1]) if (type(iu) == int)] IUlist.sort() IU1 = min(IUlist) lstturn = max(turns) IUlist = [iu for iu in list(subset[text][lstturn]) if (type(iu) == int)] lstIU = max(IUlist) header += self.format_time(subset[text][turn1][IU1]['start'], decimal) + ' - ' + self.format_time(subset[text][lstturn][lstIU]['end'], decimal) + ')\n' if title == True: output += header for turn in turns: IUlist = [i for i in subset[text][turn] if (type(i) == int)] IUlist.sort() label = subset[text][turn]['ID'] if label == 0: label = 'OTHER' else: label = self.getParticipants('ID='+str(label),'NAME')[-1] IU1 = min(IUlist) turn_start = self.format_time(subset[text][turn][IU1]['start'], decimal) turn_end = self.format_time(subset[text][turn][IU1]['end'], decimal) if numberedlines == True: output += str(IU1)+'\t' if timestamps == True: output += turn_start + '\t' + turn_end + '\t' if labels == True: output += label + ';\t' IUtext = [] words = [wd for wd in list(subset[text][turn][IU1]) if (type(wd) == int)] words.sort() for word in words: IUtext += [subset[text][turn][IU1][word][tier]] output += ' '.join(IUtext) + '\n' if len(IUlist) > 1: for IU in IUlist[1:]: IUtext = [] turn_start = self.format_time(subset[text][turn][IU]['start'], decimal) turn_end = self.format_time(subset[text][turn][IU]['end'], decimal) if numberedlines == True: output += str(IU)+'\t' if timestamps == True: output += turn_start + '\t' + turn_end + '\t' if labels == True: output += '\t' IUtext = [] words = [wd for wd in list(subset[text][turn][IU]) if (type(wd) == int)] words.sort() for word in words: IUtext += [subset[text][turn][IU][word][tier]] for word in IUtext: pass output += ' '.join(IUtext) + '\n' print(output) def listWords(self, words='all', tier='dt', IUs=True): wordlist=[] IUlist=[] if words == 'all': words == getTexts() prevIU = int for text,turn,IU,word in self.generator(words,'word'): if IUs == True: if prevIU != IU: if prevIU != int: wordlist += [[IUlist]] IUlist = [] IUlist += [words[text][turn][IU][word][tier]] prevIU = IU else: wordlist += [words[text][turn][IU][word][tier]] if IUlist != []: wordlist += IUlist return wordlist
en
0.164391
#!/usr/bin/python3 #from __future__ import print_function #from xml import * #self.corpus = SBCorpus.xmlreader()
2.741103
3
DetectorSendDataSrc/ConfigManager.py
KeenHausOrg/DetectorSendData
0
6613271
<reponame>KeenHausOrg/DetectorSendData<gh_stars>0 import json class ConfigManager: TWILIO_ACCOUNT_SID = "" TWILIO_SECRET = "" TWILIO_APIKEY = "" TWILIO_RECEIVERNUMBER = "" TWILIO_SENDERNUMBER = "" IMGUR_BEARERTOKEN = "" IMGUR_APIURL = "" def __init__(self): with open('ChrisSecrets.json') as f: data = json.load(f) self.TWILIO_ACCOUNT_SID = data['twilio']['AccountSID'] self.TWILIO_SECRET = data['twilio']['Secret'] self.TWILIO_APIKEY = data['twilio']['APIKey'] self.TWILIO_RECEIVERNUMBER = data['twilio']['ReceiverPhone'] self.TWILIO_SENDERNUMBER = data['twilio']['SenderPhone'] self.IMGUR_APIURL = data['imgur']['ApiUrl'] self.IMGUR_BEARERTOKEN = data['imgur']['BearerToken']
import json class ConfigManager: TWILIO_ACCOUNT_SID = "" TWILIO_SECRET = "" TWILIO_APIKEY = "" TWILIO_RECEIVERNUMBER = "" TWILIO_SENDERNUMBER = "" IMGUR_BEARERTOKEN = "" IMGUR_APIURL = "" def __init__(self): with open('ChrisSecrets.json') as f: data = json.load(f) self.TWILIO_ACCOUNT_SID = data['twilio']['AccountSID'] self.TWILIO_SECRET = data['twilio']['Secret'] self.TWILIO_APIKEY = data['twilio']['APIKey'] self.TWILIO_RECEIVERNUMBER = data['twilio']['ReceiverPhone'] self.TWILIO_SENDERNUMBER = data['twilio']['SenderPhone'] self.IMGUR_APIURL = data['imgur']['ApiUrl'] self.IMGUR_BEARERTOKEN = data['imgur']['BearerToken']
none
1
2.744715
3
flatcoke/settings/base.py
flatcoke/django-structure
6
6613272
""" Django settings for flatcoke project. Generated by 'django-admin startproject' using Django 2.0.1. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os import datetime # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY', '<KEY>') # SECURITY WARNING: don't run with debug turned on in production! ALLOWED_HOSTS = ["*"] INTERNAL_IPS = ['127.0.0.1'] STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'debug_toolbar', 'api', 'api.users', 'api.posts', 'rest_framework', # For rest api 'rest_framework_swagger', # Doc 'cacheops', # ORM cache 'corsheaders', # for CORS # https://github.com/celery/django-celery-results/issues/19 'django_celery_results', # Worker result (It doesn't work) 'django_celery_beat', # Worker scheduler ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', 'api.middleware.Response403To401Middleware' ] ROOT_URLCONF = 'flatcoke.urls' AUTH_USER_MODEL = 'users.User' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'flatcoke.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases db_user = os.environ.get('DB_USER', 'cola') db_password = os.environ.get('DB_PASSWORD', '<PASSWORD>') DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': os.environ.get('DATABASE', 'flatcoke'), 'USER': db_user, 'PASSWORD': <PASSWORD>, 'HOST': os.environ.get('DB_HOST', '127.0.0.1'), 'PORT': os.environ.get('DB_PORT', '3306'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': ('django.contrib.auth.password_validation.' 'UserAttributeSimilarityValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'MinimumLengthValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'CommonPasswordValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'NumericPasswordValidator'), }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' LOGGING = { 'version': 1, 'filters': { 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', } }, 'handlers': { 'console': { 'level': 'DEBUG', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', } }, 'loggers': { 'django.db.backends': { 'level': 'DEBUG', 'handlers': ['console'], } } } REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': ('rest_framework.pagination.' 'PageNumberPagination'), 'PAGE_SIZE': 10, 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework_jwt.authentication.JSONWebTokenAuthentication', 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', ), } SWAGGER_SETTINGS = { 'SECURITY_DEFINITIONS': { 'api_key': { 'type': 'apiKey', 'in': 'header', 'name': 'Authorization' } }, 'USE_SESSION_AUTH': True, 'LOGIN_URL': 'rest_framework:login', 'LOGOUT_URL': 'rest_framework:logout', } SHELL_PLUS_PRINT_SQL = True # commend shell_plug with logging sql SITE_ID = 1 CACHEOPS_REDIS = { 'host': os.environ.get('REDIS_HOST', 'localhost'), 'port': os.environ.get('REDIS_PORT', '6379'), 'db': os.environ.get('REDIS_DB', 0), } CACHEOPS_DEFAULTS = { 'timeout': 60 * 60 } CACHEOPS = { 'users.user': {'ops': 'get', 'timeout': 60 * 15}, '*.*': {}, } JWT_AUTH = { 'JWT_ENCODE_HANDLER': 'rest_framework_jwt.utils.jwt_encode_handler', 'JWT_DECODE_HANDLER': 'rest_framework_jwt.utils.jwt_decode_handler', 'JWT_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_payload_handler', 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler', 'JWT_RESPONSE_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_response_payload_handler', 'JWT_SECRET_KEY': os.environ.get('SECRET_KEY', 'secret'), 'JWT_GET_USER_SECRET_KEY': None, 'JWT_PUBLIC_KEY': None, 'JWT_PRIVATE_KEY': None, 'JWT_ALGORITHM': 'HS256', 'JWT_VERIFY': True, 'JWT_VERIFY_EXPIRATION': True, 'JWT_LEEWAY': 0, 'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=7200), 'JWT_AUDIENCE': None, 'JWT_ISSUER': None, 'JWT_ALLOW_REFRESH': True, 'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7), 'JWT_AUTH_HEADER_PREFIX': 'JWT', 'JWT_AUTH_COOKIE': None, } CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'redis://redis:6379/0') CELERY_RESULT_BACKEND = 'django-db' CELERY_ACCEPT_CONTENT = ['application/json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_TIMEZONE = 'Asia/Seoul' CORS_ORIGIN_ALLOW_ALL = True
""" Django settings for flatcoke project. Generated by 'django-admin startproject' using Django 2.0.1. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os import datetime # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY', '<KEY>') # SECURITY WARNING: don't run with debug turned on in production! ALLOWED_HOSTS = ["*"] INTERNAL_IPS = ['127.0.0.1'] STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'debug_toolbar', 'api', 'api.users', 'api.posts', 'rest_framework', # For rest api 'rest_framework_swagger', # Doc 'cacheops', # ORM cache 'corsheaders', # for CORS # https://github.com/celery/django-celery-results/issues/19 'django_celery_results', # Worker result (It doesn't work) 'django_celery_beat', # Worker scheduler ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', 'api.middleware.Response403To401Middleware' ] ROOT_URLCONF = 'flatcoke.urls' AUTH_USER_MODEL = 'users.User' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'flatcoke.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases db_user = os.environ.get('DB_USER', 'cola') db_password = os.environ.get('DB_PASSWORD', '<PASSWORD>') DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': os.environ.get('DATABASE', 'flatcoke'), 'USER': db_user, 'PASSWORD': <PASSWORD>, 'HOST': os.environ.get('DB_HOST', '127.0.0.1'), 'PORT': os.environ.get('DB_PORT', '3306'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': ('django.contrib.auth.password_validation.' 'UserAttributeSimilarityValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'MinimumLengthValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'CommonPasswordValidator'), }, { 'NAME': ('django.contrib.auth.password_validation.' 'NumericPasswordValidator'), }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' LOGGING = { 'version': 1, 'filters': { 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', } }, 'handlers': { 'console': { 'level': 'DEBUG', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', } }, 'loggers': { 'django.db.backends': { 'level': 'DEBUG', 'handlers': ['console'], } } } REST_FRAMEWORK = { 'DEFAULT_PAGINATION_CLASS': ('rest_framework.pagination.' 'PageNumberPagination'), 'PAGE_SIZE': 10, 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework_jwt.authentication.JSONWebTokenAuthentication', 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication', ), } SWAGGER_SETTINGS = { 'SECURITY_DEFINITIONS': { 'api_key': { 'type': 'apiKey', 'in': 'header', 'name': 'Authorization' } }, 'USE_SESSION_AUTH': True, 'LOGIN_URL': 'rest_framework:login', 'LOGOUT_URL': 'rest_framework:logout', } SHELL_PLUS_PRINT_SQL = True # commend shell_plug with logging sql SITE_ID = 1 CACHEOPS_REDIS = { 'host': os.environ.get('REDIS_HOST', 'localhost'), 'port': os.environ.get('REDIS_PORT', '6379'), 'db': os.environ.get('REDIS_DB', 0), } CACHEOPS_DEFAULTS = { 'timeout': 60 * 60 } CACHEOPS = { 'users.user': {'ops': 'get', 'timeout': 60 * 15}, '*.*': {}, } JWT_AUTH = { 'JWT_ENCODE_HANDLER': 'rest_framework_jwt.utils.jwt_encode_handler', 'JWT_DECODE_HANDLER': 'rest_framework_jwt.utils.jwt_decode_handler', 'JWT_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_payload_handler', 'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler', 'JWT_RESPONSE_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_response_payload_handler', 'JWT_SECRET_KEY': os.environ.get('SECRET_KEY', 'secret'), 'JWT_GET_USER_SECRET_KEY': None, 'JWT_PUBLIC_KEY': None, 'JWT_PRIVATE_KEY': None, 'JWT_ALGORITHM': 'HS256', 'JWT_VERIFY': True, 'JWT_VERIFY_EXPIRATION': True, 'JWT_LEEWAY': 0, 'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=7200), 'JWT_AUDIENCE': None, 'JWT_ISSUER': None, 'JWT_ALLOW_REFRESH': True, 'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7), 'JWT_AUTH_HEADER_PREFIX': 'JWT', 'JWT_AUTH_COOKIE': None, } CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'redis://redis:6379/0') CELERY_RESULT_BACKEND = 'django-db' CELERY_ACCEPT_CONTENT = ['application/json'] CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT_SERIALIZER = 'json' CELERY_TIMEZONE = 'Asia/Seoul' CORS_ORIGIN_ALLOW_ALL = True
en
0.661927
Django settings for flatcoke project. Generated by 'django-admin startproject' using Django 2.0.1. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition # For rest api # Doc # ORM cache # for CORS # https://github.com/celery/django-celery-results/issues/19 # Worker result (It doesn't work) # Worker scheduler # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ # commend shell_plug with logging sql
1.743474
2
hihope_neptune-oh_hid/00_src/v0.1/device/winnermicro/neptune/w800_adapter/copy_library.py
dawmlight/vendor_oh_fun
1
6613273
<gh_stars>1-10 #! usr/bin/env python import sys import shutil import os import argparse def main(): parser = argparse.ArgumentParser(description='Copy prebuilt library to out dir.') parser.add_argument('--source', help='Source file') parser.add_argument('--target', help='Target file') args = parser.parse_args() source = args.source target = args.target if (source or target) is None: print("Source file or taget file is None.") return -1 print("source is {}".format(source)) print("target is {}".format(target)) try: shutil.copyfile(source, target) except Exception as e: print("Copy {} to {} failed: {}".format(source, target, e)) return -1 else: return if __name__ == "__main__": sys.exit(main())
#! usr/bin/env python import sys import shutil import os import argparse def main(): parser = argparse.ArgumentParser(description='Copy prebuilt library to out dir.') parser.add_argument('--source', help='Source file') parser.add_argument('--target', help='Target file') args = parser.parse_args() source = args.source target = args.target if (source or target) is None: print("Source file or taget file is None.") return -1 print("source is {}".format(source)) print("target is {}".format(target)) try: shutil.copyfile(source, target) except Exception as e: print("Copy {} to {} failed: {}".format(source, target, e)) return -1 else: return if __name__ == "__main__": sys.exit(main())
hu
0.132117
#! usr/bin/env python
2.745389
3
auths/email.py
horsehair/unimate
0
6613274
<gh_stars>0 def message(domain, uidb, token): return f"아래 링크를 클릭하면 학교 이메일 인증이 완료됩니다.\n\n회원가입 링크 : http://{domain}/activate/{uidb}/{token}\n\n감사합니다."
def message(domain, uidb, token): return f"아래 링크를 클릭하면 학교 이메일 인증이 완료됩니다.\n\n회원가입 링크 : http://{domain}/activate/{uidb}/{token}\n\n감사합니다."
none
1
1.401092
1
examples/example-plugin/ExampleWidget.py
physimals/quantiphyse
9
6613275
<reponame>physimals/quantiphyse """ Quantiphyse - Example processing widget Copyright (c) 2013-2018 University of Oxford """ from __future__ import division, unicode_literals, absolute_import, print_function import sys, os, warnings import time import traceback import re import tempfile import nibabel as nib import numpy as np import pyqtgraph as pg from PySide import QtCore, QtGui from quantiphyse.utils import QpException, debug from quantiphyse.gui.widgets import QpWidget, BatchButton, HelpButton DESC = """This is an example of how to create a plugin widget""" class ExamplePluginWidget(QpWidget): def __init__(self, **kwargs): super(ExamplePluginWidget, self).__init__(name="Example Plugin", desc="An example plugin", group="Examples", **kwargs) def init_ui(self): layout = QtWidgets.QVBoxLayout() self.setLayout(layout) hbox = QtWidgets.QHBoxLayout() hbox.addWidget(QtWidgets.QLabel('<font size="5">Example Plugin Widget</font>')) hbox.addStretch(1) hbox.addWidget(BatchButton(self)) hbox.addWidget(HelpButton(self, "example_plugin")) layout.addLayout(hbox) desc = QtWidgets.QLabel(DESC) desc.setWordWrap(True) layout.addWidget(desc) layout.addWidget(QtWidgets.QLabel("")) def activate(self): self.ivm.sig_all_data.connect(self.data_changed) def deactivate(self): self.ivm.sig_all_data.disconnect(self.data_changed) def data_changed(self, data): pass def batch_options(self): return "ExamplePluginProcess", {} class ExampleWidget(QpWidget): """ Widget for setting a threshold to the image data inside the ROI. This is saved as data """ def __init__(self, **kwargs): super(ExampleWidget, self).__init__(name="Threshold", desc="Threshold data", icon="quantiphyse", **kwargs) def init_ui(self): main_vbox = QtWidgets.QVBoxLayout() hbox = QtWidgets.QHBoxLayout() hbox.addWidget(QtWidgets.QLabel('<font size="5">Threshold volume</font>')) hbox.addStretch(1) hbox.addWidget(HelpButton(self)) main_vbox.addLayout(hbox) explanation = QtWidgets.QLabel('This is a basic example of a \n' 'widget for development purposes. \n' 'A DCE-MRI image and ROI are \n' 'loaded normally and clicking run \n' 'creates a data set which only shows values \n' 'in the ROI above a defined threshold.') main_vbox.addWidget(explanation) hbox = QtWidgets.QHBoxLayout() self.b1 = QtWidgets.QPushButton('Process', self) self.b1.clicked.connect(self.run_threshold) hbox.addWidget(self.b1) hbox.addStretch(1) hbox.addWidget(QtWidgets.QLabel('ROI threshold value:')) self.val_t1 = QtWidgets.QLineEdit('1', self) hbox.addWidget(self.val_t1) main_vbox.addLayout(hbox) hbox = QtWidgets.QHBoxLayout() hbox.addStretch(1) hbox.addWidget(QtWidgets.QLabel('Slice to threshold:')) self.val_s1 = QtWidgets.QLineEdit('0', self) hbox.addWidget(self.val_s1) main_vbox.addLayout(hbox) main_vbox.addStretch(1) self.setLayout(main_vbox) def run_threshold(self): # Check if an image and roi exists or otherwise throw an error if self.ivm.vol is None: raise QpException("No data loaded") if self.ivm.current_roi is None: raise QpException("No ROI loaded") slice = int(self.val_s1.text()) thresh = float(self.val_t1.text()) img = self.ivm.vol[:, :, :, slice1] img = img * (self.ivm.current_roi > 0) img[img1 < thresh] = 0 self.ivm.add_data(img, name='thresh', make_current=True)
""" Quantiphyse - Example processing widget Copyright (c) 2013-2018 University of Oxford """ from __future__ import division, unicode_literals, absolute_import, print_function import sys, os, warnings import time import traceback import re import tempfile import nibabel as nib import numpy as np import pyqtgraph as pg from PySide import QtCore, QtGui from quantiphyse.utils import QpException, debug from quantiphyse.gui.widgets import QpWidget, BatchButton, HelpButton DESC = """This is an example of how to create a plugin widget""" class ExamplePluginWidget(QpWidget): def __init__(self, **kwargs): super(ExamplePluginWidget, self).__init__(name="Example Plugin", desc="An example plugin", group="Examples", **kwargs) def init_ui(self): layout = QtWidgets.QVBoxLayout() self.setLayout(layout) hbox = QtWidgets.QHBoxLayout() hbox.addWidget(QtWidgets.QLabel('<font size="5">Example Plugin Widget</font>')) hbox.addStretch(1) hbox.addWidget(BatchButton(self)) hbox.addWidget(HelpButton(self, "example_plugin")) layout.addLayout(hbox) desc = QtWidgets.QLabel(DESC) desc.setWordWrap(True) layout.addWidget(desc) layout.addWidget(QtWidgets.QLabel("")) def activate(self): self.ivm.sig_all_data.connect(self.data_changed) def deactivate(self): self.ivm.sig_all_data.disconnect(self.data_changed) def data_changed(self, data): pass def batch_options(self): return "ExamplePluginProcess", {} class ExampleWidget(QpWidget): """ Widget for setting a threshold to the image data inside the ROI. This is saved as data """ def __init__(self, **kwargs): super(ExampleWidget, self).__init__(name="Threshold", desc="Threshold data", icon="quantiphyse", **kwargs) def init_ui(self): main_vbox = QtWidgets.QVBoxLayout() hbox = QtWidgets.QHBoxLayout() hbox.addWidget(QtWidgets.QLabel('<font size="5">Threshold volume</font>')) hbox.addStretch(1) hbox.addWidget(HelpButton(self)) main_vbox.addLayout(hbox) explanation = QtWidgets.QLabel('This is a basic example of a \n' 'widget for development purposes. \n' 'A DCE-MRI image and ROI are \n' 'loaded normally and clicking run \n' 'creates a data set which only shows values \n' 'in the ROI above a defined threshold.') main_vbox.addWidget(explanation) hbox = QtWidgets.QHBoxLayout() self.b1 = QtWidgets.QPushButton('Process', self) self.b1.clicked.connect(self.run_threshold) hbox.addWidget(self.b1) hbox.addStretch(1) hbox.addWidget(QtWidgets.QLabel('ROI threshold value:')) self.val_t1 = QtWidgets.QLineEdit('1', self) hbox.addWidget(self.val_t1) main_vbox.addLayout(hbox) hbox = QtWidgets.QHBoxLayout() hbox.addStretch(1) hbox.addWidget(QtWidgets.QLabel('Slice to threshold:')) self.val_s1 = QtWidgets.QLineEdit('0', self) hbox.addWidget(self.val_s1) main_vbox.addLayout(hbox) main_vbox.addStretch(1) self.setLayout(main_vbox) def run_threshold(self): # Check if an image and roi exists or otherwise throw an error if self.ivm.vol is None: raise QpException("No data loaded") if self.ivm.current_roi is None: raise QpException("No ROI loaded") slice = int(self.val_s1.text()) thresh = float(self.val_t1.text()) img = self.ivm.vol[:, :, :, slice1] img = img * (self.ivm.current_roi > 0) img[img1 < thresh] = 0 self.ivm.add_data(img, name='thresh', make_current=True)
en
0.725523
Quantiphyse - Example processing widget Copyright (c) 2013-2018 University of Oxford This is an example of how to create a plugin widget Widget for setting a threshold to the image data inside the ROI. This is saved as data # Check if an image and roi exists or otherwise throw an error
2.419493
2
src/nmdc/scripts/__main__.py
microbiomedata/pynmdc
0
6613276
<reponame>microbiomedata/pynmdc """ CLI tools for NMDC """ import click import json import jsonschema from jsonschema import validate from nmdc import __version__ from nmdc.scripts.gff2json import NMDCGFFLoader @click.group(help=f"""NMDC Tools v{__version__}.""") def nmdccli(): """ NMDC command line tools. """ pass @nmdccli.command() @click.argument('gff', type=click.File('r')) @click.option('-of', help='output file name for genome feature json', required=True, type=click.File(mode='w')) @click.option('-oa', help='output file name for functioanl annotation json', required=True, type=click.File(mode='w')) @click.option('-ai', help='Activity index', required=True, type=str) def gff2json(gff, of, oa, ai): """ Convert GFF3 to NMDC JSON format. """ INDENT = 2 converter = NMDCGFFLoader(gff, ai) jobj = converter.model features = [] annotations = [] for record in jobj.keys(): for feature in jobj[record].keys(): entry = jobj[record][feature] features.append(entry['genome_feature_set']) annotations.extend(list(entry['functional_annotation_set'].values())) of.write(json.dumps({'genome_feature_set': features}, indent=INDENT)) oa.write(json.dumps({'functional_annotation_set': annotations}, indent=INDENT)) @nmdccli.command('validate') @click.option('--schema', help='The NMDC metadata JSON Schema', required=True, type=click.Path()) @click.option('--file', help='The file with JSON data to validate', required=True, type=click.Path()) def validate_json(schema, file): """ Validate JSON as per NMDC Metadata schema. """ try: validate(instance=json.load(open(file)), schema=json.load(open(schema))) print("Given JSON data is valid") except jsonschema.exceptions.ValidationError as err: print(err) print("Given JSON data is not valid") if __name__ == '__main__': nmdccli()
""" CLI tools for NMDC """ import click import json import jsonschema from jsonschema import validate from nmdc import __version__ from nmdc.scripts.gff2json import NMDCGFFLoader @click.group(help=f"""NMDC Tools v{__version__}.""") def nmdccli(): """ NMDC command line tools. """ pass @nmdccli.command() @click.argument('gff', type=click.File('r')) @click.option('-of', help='output file name for genome feature json', required=True, type=click.File(mode='w')) @click.option('-oa', help='output file name for functioanl annotation json', required=True, type=click.File(mode='w')) @click.option('-ai', help='Activity index', required=True, type=str) def gff2json(gff, of, oa, ai): """ Convert GFF3 to NMDC JSON format. """ INDENT = 2 converter = NMDCGFFLoader(gff, ai) jobj = converter.model features = [] annotations = [] for record in jobj.keys(): for feature in jobj[record].keys(): entry = jobj[record][feature] features.append(entry['genome_feature_set']) annotations.extend(list(entry['functional_annotation_set'].values())) of.write(json.dumps({'genome_feature_set': features}, indent=INDENT)) oa.write(json.dumps({'functional_annotation_set': annotations}, indent=INDENT)) @nmdccli.command('validate') @click.option('--schema', help='The NMDC metadata JSON Schema', required=True, type=click.Path()) @click.option('--file', help='The file with JSON data to validate', required=True, type=click.Path()) def validate_json(schema, file): """ Validate JSON as per NMDC Metadata schema. """ try: validate(instance=json.load(open(file)), schema=json.load(open(schema))) print("Given JSON data is valid") except jsonschema.exceptions.ValidationError as err: print(err) print("Given JSON data is not valid") if __name__ == '__main__': nmdccli()
en
0.61103
CLI tools for NMDC NMDC Tools v{__version__}. NMDC command line tools. Convert GFF3 to NMDC JSON format. Validate JSON as per NMDC Metadata schema.
2.395115
2
modu/tests/slurm-test-shell.py
zamalchi/script-runner-bottle-app
0
6613277
<gh_stars>0 #!/usr/bin/env python # from __future__ import print_function import code import readline # context provides the slurm module # from context import slurm import modu.slurm as slurm import modu.color_printer as cp print("----------------------------------------------------------------------") cp.printWarn("import modu.slurm as slurm") cp.printWarn("import modu.color_printer as cp") cp.printWarn("states = slurm.Slurm.getNonEmptyStates()") print("----------------------------------------------------------------------") states = slurm.Slurm.getNonEmptyStates() vars = globals().copy() vars.update(locals()) shell = code.InteractiveConsole(vars) shell.interact()
#!/usr/bin/env python # from __future__ import print_function import code import readline # context provides the slurm module # from context import slurm import modu.slurm as slurm import modu.color_printer as cp print("----------------------------------------------------------------------") cp.printWarn("import modu.slurm as slurm") cp.printWarn("import modu.color_printer as cp") cp.printWarn("states = slurm.Slurm.getNonEmptyStates()") print("----------------------------------------------------------------------") states = slurm.Slurm.getNonEmptyStates() vars = globals().copy() vars.update(locals()) shell = code.InteractiveConsole(vars) shell.interact()
en
0.308232
#!/usr/bin/env python # from __future__ import print_function # context provides the slurm module # from context import slurm
2.205076
2
dscore/plot.py
thodson-usgs/dscore
1
6613278
import matplotlib.pyplot as plt import seaborn as sns tick_label_size = 9 def ilamb_card(scores, vmin=0, vmax=100, component_labels=None, score_label=None, cmap='RdYlBu', reverse_cmap=False, hlines=None, cbar=False, annot_kws={"size": 8}, ax=None): """ Parameters ---------- scores : pandas.DataFrame DataFrame with rows for each component vmin : float vmax : float score_labels : list hlines : list rows to place horizontal lines """ if ax is None: fig, ax = plt.subplots(1) if component_labels is None: component_labels = scores.index # if score_label is None and hasattr(scores, 'name'): # score_label = scores.name g = sns.heatmap(scores.round().astype(int), vmin=vmin, vmax=vmax, yticklabels=component_labels, annot=True, fmt='d', annot_kws=annot_kws, cmap=cmap, cbar=cbar, square=True, ax=ax) ax.set_xticklabels( ax.get_xticklabels(), rotation=90) if hlines is not None: ax.hlines(hlines, *ax.get_xlim(), colors='k') ax.xaxis.set_ticks_position('top') ax.xaxis.set_label_position('top') ax.set_xlabel(score_label) ax.set_ylabel('Component') ax.tick_params(axis='y', labelsize=tick_label_size) ax.tick_params(axis='x', labelsize=tick_label_size) return g def multi_panel_card(score_cards, score_labels, component_labels=None, hlines=None, figsize=None, tight_layout=True): """Helper function for plotting multiple score cards with the same components """ fig, ax = plt.subplots(1, len(score_cards), figsize=figsize) fig.subplots_adjust(hspace=0) for i, score in enumerate(score_cards): ilamb_card(score, score_label=score_labels[i], hlines=hlines, ax=ax[i]) #import pdb; pdb.set_trace() if not ax[i].get_subplotspec().is_first_col(): ax[i].set_yticklabels([]) ax[i].tick_params(left=False) ax[i].set_ylabel('') fig.tight_layout() return fig
import matplotlib.pyplot as plt import seaborn as sns tick_label_size = 9 def ilamb_card(scores, vmin=0, vmax=100, component_labels=None, score_label=None, cmap='RdYlBu', reverse_cmap=False, hlines=None, cbar=False, annot_kws={"size": 8}, ax=None): """ Parameters ---------- scores : pandas.DataFrame DataFrame with rows for each component vmin : float vmax : float score_labels : list hlines : list rows to place horizontal lines """ if ax is None: fig, ax = plt.subplots(1) if component_labels is None: component_labels = scores.index # if score_label is None and hasattr(scores, 'name'): # score_label = scores.name g = sns.heatmap(scores.round().astype(int), vmin=vmin, vmax=vmax, yticklabels=component_labels, annot=True, fmt='d', annot_kws=annot_kws, cmap=cmap, cbar=cbar, square=True, ax=ax) ax.set_xticklabels( ax.get_xticklabels(), rotation=90) if hlines is not None: ax.hlines(hlines, *ax.get_xlim(), colors='k') ax.xaxis.set_ticks_position('top') ax.xaxis.set_label_position('top') ax.set_xlabel(score_label) ax.set_ylabel('Component') ax.tick_params(axis='y', labelsize=tick_label_size) ax.tick_params(axis='x', labelsize=tick_label_size) return g def multi_panel_card(score_cards, score_labels, component_labels=None, hlines=None, figsize=None, tight_layout=True): """Helper function for plotting multiple score cards with the same components """ fig, ax = plt.subplots(1, len(score_cards), figsize=figsize) fig.subplots_adjust(hspace=0) for i, score in enumerate(score_cards): ilamb_card(score, score_label=score_labels[i], hlines=hlines, ax=ax[i]) #import pdb; pdb.set_trace() if not ax[i].get_subplotspec().is_first_col(): ax[i].set_yticklabels([]) ax[i].tick_params(left=False) ax[i].set_ylabel('') fig.tight_layout() return fig
en
0.496586
Parameters ---------- scores : pandas.DataFrame DataFrame with rows for each component vmin : float vmax : float score_labels : list hlines : list rows to place horizontal lines # if score_label is None and hasattr(scores, 'name'): # score_label = scores.name Helper function for plotting multiple score cards with the same components #import pdb; pdb.set_trace()
2.852977
3
Tornado/app.py
lowjack1/Bitmap-Tornado
4
6613279
__author__ = "lowjack" import views import tornado.ioloop import tornado.web import tornado.httpserver from settings import port, static_path, template_path class Application(tornado.web.Application): def __init__(self): urls = [ (r"/", views.MainHandler), (r"/home", views.MainHandler), (r"/contact", views.ContactHandler), (r"/about", views.AboutusHandler), (r"/more", views.MoreHandler), (r"/search", views.SearchHandler), ] setting = { "template_path": template_path, "static_path": static_path, "xsrf_cookies": True } tornado.web.Application.__init__(self, urls, **setting) def main(): app = Application() http_server = tornado.httpserver.HTTPServer(app) http_server.listen(port) print("Starting development server at http://127.0.0.1:%d" %(port)) print("Quit the server with CONTROL-C.") try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: print(" Server Closed.") if __name__ == "__main__": main()
__author__ = "lowjack" import views import tornado.ioloop import tornado.web import tornado.httpserver from settings import port, static_path, template_path class Application(tornado.web.Application): def __init__(self): urls = [ (r"/", views.MainHandler), (r"/home", views.MainHandler), (r"/contact", views.ContactHandler), (r"/about", views.AboutusHandler), (r"/more", views.MoreHandler), (r"/search", views.SearchHandler), ] setting = { "template_path": template_path, "static_path": static_path, "xsrf_cookies": True } tornado.web.Application.__init__(self, urls, **setting) def main(): app = Application() http_server = tornado.httpserver.HTTPServer(app) http_server.listen(port) print("Starting development server at http://127.0.0.1:%d" %(port)) print("Quit the server with CONTROL-C.") try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: print(" Server Closed.") if __name__ == "__main__": main()
none
1
2.404387
2
app.py
ezronis/fitmetrix-booking-tool
0
6613280
import requests import browsercookie import FitMetrixAPI as fmAPI import utils import data import creds from bs4 import BeautifulSoup # grabbing cookies from chrome browser cj = browsercookie.chrome() s = requests.Session() # setting cookies for this session #s.cookies = cj # logging in and printing title of html response l = fmAPI.login(s, creds.username, creds.password) print(utils.get_html_title(l.content)) # iterating through class names in classes array for keys, values in data.classes.items(): # getting schedule day = utils.get_vip_date(2018, 11, 12, keys) sched = fmAPI.get_schedule(s, day, day) ''' # creating record of schedule for requested date range Html_file = open("html/sched_test" + (day) + ".html","w") Html_file.write(str(sched.content)) Html_file.close() ''' # validating "title" of schedule, <span id="wmrhDateCurr">October 31, 2018</span> soup = BeautifulSoup(sched.content, 'html.parser') print(soup.find(id="wmrhDateCurr").next_element) # getting appointment ids for classes in sched.html apptids = utils.get_apptids(str(sched.content)) print(str(apptids)) for c in values: print('class: '+ c) class_index = [] class_index = utils.get_class_index(str(sched.content), c) print(class_index) for i in class_index: print('index of current class:' + str(i)) apptid = apptids[i] print('apptid:' + str(apptid)) booking = fmAPI.book_spot1(s, apptid, data.spot) print(utils.get_html_title(booking.content)) print('classes have been booked!')
import requests import browsercookie import FitMetrixAPI as fmAPI import utils import data import creds from bs4 import BeautifulSoup # grabbing cookies from chrome browser cj = browsercookie.chrome() s = requests.Session() # setting cookies for this session #s.cookies = cj # logging in and printing title of html response l = fmAPI.login(s, creds.username, creds.password) print(utils.get_html_title(l.content)) # iterating through class names in classes array for keys, values in data.classes.items(): # getting schedule day = utils.get_vip_date(2018, 11, 12, keys) sched = fmAPI.get_schedule(s, day, day) ''' # creating record of schedule for requested date range Html_file = open("html/sched_test" + (day) + ".html","w") Html_file.write(str(sched.content)) Html_file.close() ''' # validating "title" of schedule, <span id="wmrhDateCurr">October 31, 2018</span> soup = BeautifulSoup(sched.content, 'html.parser') print(soup.find(id="wmrhDateCurr").next_element) # getting appointment ids for classes in sched.html apptids = utils.get_apptids(str(sched.content)) print(str(apptids)) for c in values: print('class: '+ c) class_index = [] class_index = utils.get_class_index(str(sched.content), c) print(class_index) for i in class_index: print('index of current class:' + str(i)) apptid = apptids[i] print('apptid:' + str(apptid)) booking = fmAPI.book_spot1(s, apptid, data.spot) print(utils.get_html_title(booking.content)) print('classes have been booked!')
en
0.779951
# grabbing cookies from chrome browser # setting cookies for this session #s.cookies = cj # logging in and printing title of html response # iterating through class names in classes array # getting schedule # creating record of schedule for requested date range Html_file = open("html/sched_test" + (day) + ".html","w") Html_file.write(str(sched.content)) Html_file.close() # validating "title" of schedule, <span id="wmrhDateCurr">October 31, 2018</span> # getting appointment ids for classes in sched.html
2.657457
3
setup.py
consbio/awsecret
0
6613281
<reponame>consbio/awsecret from setuptools import setup from awsecret import VERSION setup( name='awsecret', description='Secure credential storage in S3.', keywords='password,aws,s3,credentials,encryption', version=VERSION, packages=['awsecret'], install_requires=['pycrypto', 'boto', 'click'], url='https://github.com/consbio/awsecret', license='BSD', entry_points={ 'console_scripts': ['awsec=awsecret.cli:main'] } )
from setuptools import setup from awsecret import VERSION setup( name='awsecret', description='Secure credential storage in S3.', keywords='password,aws,s3,credentials,encryption', version=VERSION, packages=['awsecret'], install_requires=['pycrypto', 'boto', 'click'], url='https://github.com/consbio/awsecret', license='BSD', entry_points={ 'console_scripts': ['awsec=awsecret.cli:main'] } )
none
1
1.472923
1
antarest/eventbus/business/interfaces.py
AntaresSimulatorTeam/antaREST
2
6613282
<reponame>AntaresSimulatorTeam/antaREST from abc import abstractmethod from typing import List from antarest.core.interfaces.eventbus import Event class IEventBusBackend: @abstractmethod def push_event(self, event: Event) -> None: pass @abstractmethod def get_events(self) -> List[Event]: pass @abstractmethod def clear_events(self) -> None: pass
from abc import abstractmethod from typing import List from antarest.core.interfaces.eventbus import Event class IEventBusBackend: @abstractmethod def push_event(self, event: Event) -> None: pass @abstractmethod def get_events(self) -> List[Event]: pass @abstractmethod def clear_events(self) -> None: pass
none
1
2.603846
3
launcher.py
Benny84/discord-music-bot-modis
0
6613283
import modis DISCORD_TOKEN = "<KEY>" CLIENT_ID = "380719766471442433" modis.console( discord_token=DISCORD_TOKEN, discord_client_id=CLIENT_ID )
import modis DISCORD_TOKEN = "<KEY>" CLIENT_ID = "380719766471442433" modis.console( discord_token=DISCORD_TOKEN, discord_client_id=CLIENT_ID )
none
1
1.380524
1
bead/archive.py
krisztianfekete/lib
1
6613284
import os import pathlib import re from cached_property import cached_property from tracelog import TRACELOG from .bead import UnpackableBead from . import meta from . import tech from .ziparchive import ZipArchive from .exceptions import InvalidArchive persistence = tech.persistence __all__ = ('Archive', 'InvalidArchive') CACHE_CONTENT_ID = 'content_id' CACHE_INPUT_MAP = 'input_map' def _cached_zip_attribute(cache_key: str, ziparchive_attribute): """Make a cache accessor @property with a self.ziparchive.attribute fallback raises InvalidArchive if the attribute is not cached and the backing ziparchive is not valid. """ def maybe_cached_attr(self): try: return self.cache[cache_key] except LookupError: return getattr(self.ziparchive, ziparchive_attribute) return property(maybe_cached_attr) class Archive(UnpackableBead): def __init__(self, filename, box_name=''): self.archive_filename = filename self.archive_path = pathlib.Path(filename) self.box_name = box_name self.name = bead_name_from_file_path(filename) self.cache = {} self.load_cache() # Check that we can get access to metadata # - either through the cache or through the archive # The resulting archive can still be invalid and die unexpectedly later with # InvalidArchive exception, as these are potentially cached values self.meta_version self.freeze_time self.kind def load_cache(self): try: try: self.cache = persistence.loads(self.cache_path.read_text()) except persistence.ReadError: TRACELOG(f"Ignoring existing, malformed bead meta cache {self.cache_path}") except FileNotFoundError: pass def save_cache(self): try: self.cache_path.write_text(persistence.dumps(self.cache)) except FileNotFoundError: pass @property def cache_path(self): if self.archive_path.suffix != '.zip': raise FileNotFoundError(f'Archive can not have cache {self.archive_path}') return self.archive_path.with_suffix('.xmeta') meta_version = _cached_zip_attribute(meta.META_VERSION, 'meta_version') content_id = _cached_zip_attribute(CACHE_CONTENT_ID, 'content_id') kind = _cached_zip_attribute(meta.KIND, 'kind') freeze_time_str = _cached_zip_attribute(meta.FREEZE_TIME, 'freeze_time_str') @property def input_map(self): try: return self.cache[CACHE_INPUT_MAP] except LookupError: return self.ziparchive.input_map @input_map.setter def input_map(self, input_map): self.cache[CACHE_INPUT_MAP] = input_map self.save_cache() @cached_property def ziparchive(self): ziparchive = ZipArchive(self.archive_filename, self.box_name) self._check_and_populate_cache(ziparchive) return ziparchive def _check_and_populate_cache(self, ziparchive): def ensure(cache_key, value): try: if self.cache[cache_key] != value: raise InvalidArchive( 'Cache disagrees with zip meta', self.archive_filename, cache_key) except KeyError: self.cache[cache_key] = value ensure(meta.META_VERSION, ziparchive.meta_version) ensure(CACHE_CONTENT_ID, ziparchive.content_id) ensure(meta.KIND, ziparchive.kind) ensure(meta.FREEZE_TIME, ziparchive.freeze_time_str) ensure(meta.INPUTS, ziparchive.meta[meta.INPUTS]) # need not match self.cache.setdefault(CACHE_INPUT_MAP, ziparchive.input_map) def validate(self): self.ziparchive.validate() @property def inputs(self): try: return tuple(meta.parse_inputs({meta.INPUTS: self.cache[meta.INPUTS]})) except LookupError: return self.ziparchive.inputs def extract_dir(self, zip_dir, fs_dir): return self.ziparchive.extract_dir(zip_dir, fs_dir) def extract_file(self, zip_path, fs_path): return self.ziparchive.extract_file(zip_path, fs_path) def unpack_code_to(self, fs_dir): self.ziparchive.unpack_code_to(fs_dir) def unpack_data_to(self, fs_dir): self.ziparchive.unpack_data_to(fs_dir) def unpack_meta_to(self, workspace): workspace.meta = self.ziparchive.meta workspace.input_map = self.input_map def bead_name_from_file_path(path): ''' Parse bead name from a file path. Might return a simpler name than intended ''' name_with_timestamp, ext = os.path.splitext(os.path.basename(path)) # assert ext == '.zip' # not enforced to allow having beads with different extensions name = re.sub('_[0-9]{8}(?:[tT][-+0-9]*)?$', '', name_with_timestamp) return name assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3.zip') assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3_20150923.zip') assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3_20150923T010203012345+0200.zip') assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3_20150923T010203012345-0200.zip') assert 'bead-2015v3' == bead_name_from_file_path('path/to/bead-2015v3_20150923.zip')
import os import pathlib import re from cached_property import cached_property from tracelog import TRACELOG from .bead import UnpackableBead from . import meta from . import tech from .ziparchive import ZipArchive from .exceptions import InvalidArchive persistence = tech.persistence __all__ = ('Archive', 'InvalidArchive') CACHE_CONTENT_ID = 'content_id' CACHE_INPUT_MAP = 'input_map' def _cached_zip_attribute(cache_key: str, ziparchive_attribute): """Make a cache accessor @property with a self.ziparchive.attribute fallback raises InvalidArchive if the attribute is not cached and the backing ziparchive is not valid. """ def maybe_cached_attr(self): try: return self.cache[cache_key] except LookupError: return getattr(self.ziparchive, ziparchive_attribute) return property(maybe_cached_attr) class Archive(UnpackableBead): def __init__(self, filename, box_name=''): self.archive_filename = filename self.archive_path = pathlib.Path(filename) self.box_name = box_name self.name = bead_name_from_file_path(filename) self.cache = {} self.load_cache() # Check that we can get access to metadata # - either through the cache or through the archive # The resulting archive can still be invalid and die unexpectedly later with # InvalidArchive exception, as these are potentially cached values self.meta_version self.freeze_time self.kind def load_cache(self): try: try: self.cache = persistence.loads(self.cache_path.read_text()) except persistence.ReadError: TRACELOG(f"Ignoring existing, malformed bead meta cache {self.cache_path}") except FileNotFoundError: pass def save_cache(self): try: self.cache_path.write_text(persistence.dumps(self.cache)) except FileNotFoundError: pass @property def cache_path(self): if self.archive_path.suffix != '.zip': raise FileNotFoundError(f'Archive can not have cache {self.archive_path}') return self.archive_path.with_suffix('.xmeta') meta_version = _cached_zip_attribute(meta.META_VERSION, 'meta_version') content_id = _cached_zip_attribute(CACHE_CONTENT_ID, 'content_id') kind = _cached_zip_attribute(meta.KIND, 'kind') freeze_time_str = _cached_zip_attribute(meta.FREEZE_TIME, 'freeze_time_str') @property def input_map(self): try: return self.cache[CACHE_INPUT_MAP] except LookupError: return self.ziparchive.input_map @input_map.setter def input_map(self, input_map): self.cache[CACHE_INPUT_MAP] = input_map self.save_cache() @cached_property def ziparchive(self): ziparchive = ZipArchive(self.archive_filename, self.box_name) self._check_and_populate_cache(ziparchive) return ziparchive def _check_and_populate_cache(self, ziparchive): def ensure(cache_key, value): try: if self.cache[cache_key] != value: raise InvalidArchive( 'Cache disagrees with zip meta', self.archive_filename, cache_key) except KeyError: self.cache[cache_key] = value ensure(meta.META_VERSION, ziparchive.meta_version) ensure(CACHE_CONTENT_ID, ziparchive.content_id) ensure(meta.KIND, ziparchive.kind) ensure(meta.FREEZE_TIME, ziparchive.freeze_time_str) ensure(meta.INPUTS, ziparchive.meta[meta.INPUTS]) # need not match self.cache.setdefault(CACHE_INPUT_MAP, ziparchive.input_map) def validate(self): self.ziparchive.validate() @property def inputs(self): try: return tuple(meta.parse_inputs({meta.INPUTS: self.cache[meta.INPUTS]})) except LookupError: return self.ziparchive.inputs def extract_dir(self, zip_dir, fs_dir): return self.ziparchive.extract_dir(zip_dir, fs_dir) def extract_file(self, zip_path, fs_path): return self.ziparchive.extract_file(zip_path, fs_path) def unpack_code_to(self, fs_dir): self.ziparchive.unpack_code_to(fs_dir) def unpack_data_to(self, fs_dir): self.ziparchive.unpack_data_to(fs_dir) def unpack_meta_to(self, workspace): workspace.meta = self.ziparchive.meta workspace.input_map = self.input_map def bead_name_from_file_path(path): ''' Parse bead name from a file path. Might return a simpler name than intended ''' name_with_timestamp, ext = os.path.splitext(os.path.basename(path)) # assert ext == '.zip' # not enforced to allow having beads with different extensions name = re.sub('_[0-9]{8}(?:[tT][-+0-9]*)?$', '', name_with_timestamp) return name assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3.zip') assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3_20150923.zip') assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3_20150923T010203012345+0200.zip') assert 'bead-2015v3' == bead_name_from_file_path('bead-2015v3_20150923T010203012345-0200.zip') assert 'bead-2015v3' == bead_name_from_file_path('path/to/bead-2015v3_20150923.zip')
en
0.829691
Make a cache accessor @property with a self.ziparchive.attribute fallback raises InvalidArchive if the attribute is not cached and the backing ziparchive is not valid. # Check that we can get access to metadata # - either through the cache or through the archive # The resulting archive can still be invalid and die unexpectedly later with # InvalidArchive exception, as these are potentially cached values # need not match Parse bead name from a file path. Might return a simpler name than intended # assert ext == '.zip' # not enforced to allow having beads with different extensions
2.126763
2
gfm/gfm.py
rcmdnk/gfm
0
6613285
#!/usr/bin/env python """ Gmail Filter Manager https://github.com/rcmdnk/gfm """ from __future__ import print_function import os import sys import xml.etree.ElementTree as ET import argparse import xml.dom.minidom import httplib2 from ruamel.yaml.scalarstring import DoubleQuotedScalarString import ruamel.yaml from oauth2client.file import Storage from oauth2client.tools import run_flow, argparser from oauth2client.client import OAuth2WebServerFlow from apiclient.discovery import build __prog__ = "gfm" __description__ = __doc__ __author__ = 'rcmdnk' __copyright__ = 'Copyright (c) 2018 rcmdnk' __credits__ = ['rcmdnk'] __license__ = 'MIT' __version__ = 'v0.0.1' __date__ = '14/Jul/2018' __maintainer__ = 'rcmdnk' __email__ = '<EMAIL>' __status__ = 'Prototype' AUTH_FILE = os.environ['HOME'] + '/.config/gmail_filter/auth' GOOGLE_CLIENT_ID = '937185253369-2er0fqahlnpn7tgou1i4mi2for07mhci.'\ 'apps.googleusercontent.com' GOOGLE_CLIENT_SECRET = '<KEY>' class GmailFilterManager(): def __init__(self, **kw): self.opt = {} for k, v in kw.items(): self.opt[k] = v # Set defaults if it is called w/o args (check dummy in argparser) if "dummy" not in self.opt: self.opt = vars(self.get_parser().parse_args()) if self.opt["client_id"] is None: self.opt["client_id"] = GOOGLE_CLIENT_ID if self.opt["client_secret"] is None: self.opt["client_secret"] = GOOGLE_CLIENT_SECRET self.service = None self.address = None self.filters = None self.filters_api = None self.filters_xml = None self.labels = None self.dic_xml2api = { "hasTheWord": "query", "doesNotHaveTheWord": "negatedQuery", "sizeOperator": "sizeComparison", } self.dic_api2xml = {v: k for k, v in self.dic_xml2api.items()} self.dic_label_xml2api = { "shouldArchive": ("removeLabelIds", "INBOX"), "shouldMarkAsRead": ("removeLabelIds", "UNREAD"), "shouldStar": ("addLabelIds", "STARRED"), "shouldTrash": ("addLabelIds", "TRASH"), "shouldNeverSpam": ("removeLabelIds", "SPAM"), "shouldAlwaysMarkAsImportant": ("addLabelIds", "IMPORTANT"), "shouldNeverMarkAsImportant": ("removeLabelIds", "IMPORTANT"), "smartLabelToApply": ("addLabelIds", "CATEGORY_"), } self.dic_label_api2xml = {v: k for k, v in self.dic_label_xml2api.items()} self.dic_size_xml2api = { "s_sl": "larger", "s_ss": "smaller", } self.dic_size_api2xml = {v: k for k, v in self.dic_size_xml2api.items()} if ((isinstance(self.opt["command"], str) and self.opt["command"] == "") or (isinstance(self.opt["command"], str) and not self.opt["command"]) or (self.opt["command"] is None)): return if isinstance(self.opt["command"], str): self.opt["command"] = [self.opt["command"]] for command in self.opt["command"]: if command == "xml2yaml": self.read_xml() self.write_yaml() elif command == "yaml2xml": self.read_yaml() self.yaml2xml() self.write_xml() elif command == "get": self.get() elif command == "put": self.put() elif command == "show_filters": self.show_filters() elif command == "show_filters_xml": self.show_filters_xml() elif command == "show_filters_api": self.show_filters_api() elif command == "show_labels_api": self.show_labels_api() else: raise ValueError("Invalid command: %s" % command) def clean(self): self.filters = None self.filters_api = None self.labels = None def authentication(self, storage): return run_flow( OAuth2WebServerFlow( client_id=self.opt["client_id"], client_secret=self.opt["client_secret"], scope=['https://www.googleapis.com/auth/gmail.modify']), storage, argparser.parse_args([])) def build_service(self, rebuild=False): conf_dir = os.path.dirname(self.opt["auth_file"]) if not os.path.isdir(conf_dir): os.makedirs(conf_dir) storage = Storage(self.opt["auth_file"]) credentials = storage.get() if rebuild or credentials is None or credentials.invalid: credentials = self.authentication(storage) http = httplib2.Http() http = credentials.authorize(http) service = build('gmail', 'v1', http=http) prof = service.users().getProfile(userId='me').execute() self.opt["address "] = prof['emailAddress'] if self.opt["debug"]: print("My address: %s" % self.opt["address"]) return service def get_service(self): if self.service is None: self.service = self.build_service() return self.service def dump_xml(self, stream=sys.stdout): my_filter = xml.dom.minidom.parseString( ET.tostring(self.filters_xml)).toprettyxml( indent=" ", encoding="utf-8") if sys.version_info.major > 2: my_filter = my_filter.decode() stream.write(my_filter) def write_xml(self): with open(self.opt["output_xml"], "w") as f: self.dump_xml(f) def read_xml(self): namespaces = {str(x[0]) if x[0] != "" else "atom": x[1] for _, x in ET.iterparse(self.opt["input_xml"], events=['start-ns'])} for k, v in namespaces.items(): if k == "atom": k = "" ET.register_namespace(k, v) tree = ET.parse(self.opt["input_xml"]) self.filters_xml = tree.getroot() for e in self.filters_xml.iter('*'): if e.text is not None: e.text = e.text.strip() if e.tail is not None: e.tail = e.tail.strip() filter_list = [] for e in self.filters_xml.findall("./atom:entry", namespaces): properties = {} for p in e.findall("./apps:property", namespaces): name = p.get("name") value = p.get("value") properties[name] = DoubleQuotedScalarString(value) if "size" not in properties: for noneed in ["sizeOperator", "sizeUnit"]: if noneed in properties: del properties[noneed] filter_list.append(properties) self.filters = {"namespaces": namespaces, "filter": filter_list} def show_filters_xml(self): self.read_xml() if self.opt["raw"]: self.dump_xml() else: self.dump_yaml() def dump_yaml(self, stream=sys.stdout): yaml = ruamel.yaml.YAML() yaml.indent(mapping=2, sequence=4, offset=2) yaml.dump(self.filters, stream=stream) def write_yaml(self): with open(self.opt["output_yaml"], "w") as stream: self.dump_yaml(stream) def read_yaml(self): yaml = ruamel.yaml.YAML() with open(self.opt["input_yaml"], "r") as f: self.filters = yaml.load(f) if "namespaces" in self.filters: if "" in self.filters["namespaces"]: self.filters["namespaces"]["atom"] =\ self.filters["namespaces"][""] del self.filters["namespaces"][""] else: self.filters["namespaces"] = { "atom": "http://www.w3.org/2005/Atom", "apps": "http://schemas.google.com/apps/2006" } def show_filters(self): self.read_yaml() self.dump_yaml() def yaml2xml(self): for k, v in self.filters["namespaces"].items(): if k == "atom": k = "" ET.register_namespace(k, v) self.filters_xml = ET.Element('feed') for f in self.filters["filters"]: if "label" in f: labels = f["label"] if isinstance( f["label"], list) else [f["label"]] del f["label"] else: labels = [None] for label in labels: entry = ET.SubElement( self.filters_xml, "{" + self.filters["namespaces"]["atom"] + "}" + 'entry') properties = f if label is not None: properties["label"] = label for k, v in properties.items(): ET.SubElement( entry, "{" + self.filters["namespaces"]["apps"] + "}property", attrib={"name": k, "value": v} ) def get_filters(self): if self.filters_api is not None: return self.filters_api = self.get_service().users().settings( ).filters().list(userId='me').execute() def show_filters_api(self): self.get_filters() if self.opt["raw"]: print(self.filters_api) return for f in self.filters_api["filter"]: print("criteria:") for a in f["criteria"]: print(" %s: %s" % (a, f["criteria"][a])) print("action:") for a in f["action"]: print(" %s: %s" % (a, f["action"][a])) print("") def get(self): self.get_filters() self.filters = { "filter": [], "namespaces": { "apps": "http://schemas.google.com/apps/2006", "atom": "http://www.w3.org/2005/Atom", } } for f in self.filters_api["filter"]: xml_filter = {} for k, v in f["criteria"].items(): key, value = self.criteria_api2xml(k, v) xml_filter[key] = value if key == "size": xml_filter["sizeUnit"] = "s_sb" for k, v in f["action"].items(): if k == "addLabelIds": for label in v: if ("addLabelIds", label) in self.dic_label_api2xml: xml_filter[self.dic_label_api2xml[ ("addLabelIds", label)]] = "true" continue if "label" not in xml_filter: xml_filter["label"] = [] xml_filter["label"].append(self.label_id2name(label)) elif k == "removeLabelIds": for label in v: if ("removeLabelIds", label) in self.dic_label_api2xml: xml_filter[self.dic_label_api2xml[ ("removeLabelIds", label)]] = "true" continue else: xml_filter[k] = v self.filters["filter"].append(xml_filter) self.write_yaml() def criteria_api2xml(self, key, value): key_out = key value_out = value if key in self.dic_api2xml: key_out = self.dic_api2xml[key] if key == "sizeComparison": value_out = self.dic_size_api2xml[value] return key_out, value_out def action_api2xml(self, key, value): key_out = key value_out = value if key in self.dic_label_api2xml: key_out = self.dic_api2xml[key] if key == "sizeComparison": value_out = self.dic_size_api2xml[value] if key == "query": key_out = "hasTheWord" elif key == "query": key_out = "hasTheWord" return key_out, value_out def get_labels(self): if self.labels is not None: return self.labels = self.get_service().users().labels().list( userId='me').execute()["labels"] def show_labels_api(self): self.get_labels() if self.opt["raw"]: for l in self.labels: print(l) return print("===User labels===") for l in sorted(filter(lambda x: x["type"] == "user", self.labels), key=lambda x: x["name"]): print("%s: %s" % (l["name"], l["id"])) print("\n===System labels===") for l in sorted(filter(lambda x: x["type"] != "user", self.labels), key=lambda x: x["name"]): print("%s: %s" % (l["name"], l["id"])) def label_id2name(self, label_id): self.get_labels() candidates = filter(lambda x: x["id"] == label_id, self.labels) if len(candidates) != 1: print("Wrong label id? id: %s, candidates: %s" % (label_id, str(candidates))) sys.exit(1) return candidates[0]["name"] def label_name2id(self, name): self.get_labels() candidates = filter(lambda x: x["name"] == name, self.labels) if len(candidates) != 1: print("Wrong label name? candidates: %s" % str(candidates)) sys.exit(1) return candidates[0]["id"] def put(self): pass @staticmethod def get_parser(): input_xml_parser = argparse.ArgumentParser(add_help=False) input_xml_parser.add_argument( "-x", "--input_xml", action="store", dest="input_xml", default="mailFilters.xml", help="Input XML file name") input_yaml_parser = argparse.ArgumentParser(add_help=False) input_yaml_parser.add_argument( "-y", "--input_yaml", action="store", dest="input_yaml", default="mailFilters.yaml", help="Input YAML file name") output_xml_parser = argparse.ArgumentParser(add_help=False) output_xml_parser.add_argument( "-X", "--output_xml", action="store", dest="output_xml", default="filters.xml", help="Output XML file name") output_yaml_parser = argparse.ArgumentParser(add_help=False) output_yaml_parser.add_argument( "-Y", "--output_yaml", action="store", dest="output_yaml", default="mailFilters.yaml", help="Output YAML file name") auth_file_parser = argparse.ArgumentParser(add_help=False) auth_file_parser.add_argument( "--auth_file", action="store", dest="auth_file", default=AUTH_FILE, help="Gmail API authentication file") client_id_parser = argparse.ArgumentParser(add_help=False) client_id_parser.add_argument( "--client_id", action="store", dest="client_id", default=None, help="Google Client ID") client_secret_parser = argparse.ArgumentParser(add_help=False) client_secret_parser.add_argument( "--client_secret", action="store", dest="client_secret", default=None, help="Google Client ID") raw_parser = argparse.ArgumentParser(add_help=False) raw_parser.add_argument( "-r", "--raw", action="store_true", dest="raw", default=False, help="Show raw output") debug_parser = argparse.ArgumentParser(add_help=False) debug_parser.add_argument( "-d", "--debug", action="store_true", dest="debug", default=False, help="Enable debug mode") dummy_parser = argparse.ArgumentParser(add_help=False) dummy_parser.add_argument( "--dummy", action="store_true", dest="dummy", default=True, help=argparse.SUPPRESS) parser = argparse.ArgumentParser( prog=__prog__, add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__description__, parents=[input_xml_parser, input_yaml_parser, output_xml_parser, output_yaml_parser, auth_file_parser, client_id_parser, client_secret_parser, raw_parser, debug_parser, dummy_parser], ) subparsers = parser.add_subparsers( title="subcommands", metavar="[command]", help="", dest="command") desc = "Convert filters from XML to YAML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_xml_parser, output_yaml_parser, debug_parser] } if sys.version_info.major > 2: kwargs["aliases"] = ["x2y"] subparsers.add_parser("xml2yaml", **kwargs) desc = "Convert filters from YAML to XML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_yaml_parser, output_xml_parser, debug_parser] } if sys.version_info.major > 2: kwargs["aliases"] = ["y2x"] subparsers.add_parser("yaml2xml", **kwargs) desc = "Get filters by using API and make YAML" subparsers.add_parser( "get", description=desc, help=desc, formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[output_yaml_parser, auth_file_parser, client_id_parser, client_secret_parser, debug_parser]) desc = "Put filters in YAML file to Gmail server by using API" subparsers.add_parser( "put", description=desc, help=desc, formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[input_yaml_parser, auth_file_parser, client_id_parser, client_secret_parser, debug_parser]) desc = "Show filters in YAML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_yaml_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show", "s"] subparsers.add_parser("show_filters", **kwargs) desc = "Show filters in XML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_xml_parser, raw_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show_xml", "sx"] subparsers.add_parser("show_filter_xml", **kwargs) desc = "Show filters taken by API" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [auth_file_parser, client_id_parser, client_secret_parser, raw_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show_api", "sa"] subparsers.add_parser("show_filterapi", **kwargs) desc = "Show labels taken by API" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [auth_file_parser, client_id_parser, client_secret_parser, raw_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show_labels", "sl"] subparsers.add_parser("show_labels_api", **kwargs) return parser def main(): parser = GmailFilterManager.get_parser() if len(sys.argv) == 1: parser.print_help() return args = parser.parse_args() GmailFilterManager(**vars(args)) if __name__ == '__main__': main()
#!/usr/bin/env python """ Gmail Filter Manager https://github.com/rcmdnk/gfm """ from __future__ import print_function import os import sys import xml.etree.ElementTree as ET import argparse import xml.dom.minidom import httplib2 from ruamel.yaml.scalarstring import DoubleQuotedScalarString import ruamel.yaml from oauth2client.file import Storage from oauth2client.tools import run_flow, argparser from oauth2client.client import OAuth2WebServerFlow from apiclient.discovery import build __prog__ = "gfm" __description__ = __doc__ __author__ = 'rcmdnk' __copyright__ = 'Copyright (c) 2018 rcmdnk' __credits__ = ['rcmdnk'] __license__ = 'MIT' __version__ = 'v0.0.1' __date__ = '14/Jul/2018' __maintainer__ = 'rcmdnk' __email__ = '<EMAIL>' __status__ = 'Prototype' AUTH_FILE = os.environ['HOME'] + '/.config/gmail_filter/auth' GOOGLE_CLIENT_ID = '937185253369-2er0fqahlnpn7tgou1i4mi2for07mhci.'\ 'apps.googleusercontent.com' GOOGLE_CLIENT_SECRET = '<KEY>' class GmailFilterManager(): def __init__(self, **kw): self.opt = {} for k, v in kw.items(): self.opt[k] = v # Set defaults if it is called w/o args (check dummy in argparser) if "dummy" not in self.opt: self.opt = vars(self.get_parser().parse_args()) if self.opt["client_id"] is None: self.opt["client_id"] = GOOGLE_CLIENT_ID if self.opt["client_secret"] is None: self.opt["client_secret"] = GOOGLE_CLIENT_SECRET self.service = None self.address = None self.filters = None self.filters_api = None self.filters_xml = None self.labels = None self.dic_xml2api = { "hasTheWord": "query", "doesNotHaveTheWord": "negatedQuery", "sizeOperator": "sizeComparison", } self.dic_api2xml = {v: k for k, v in self.dic_xml2api.items()} self.dic_label_xml2api = { "shouldArchive": ("removeLabelIds", "INBOX"), "shouldMarkAsRead": ("removeLabelIds", "UNREAD"), "shouldStar": ("addLabelIds", "STARRED"), "shouldTrash": ("addLabelIds", "TRASH"), "shouldNeverSpam": ("removeLabelIds", "SPAM"), "shouldAlwaysMarkAsImportant": ("addLabelIds", "IMPORTANT"), "shouldNeverMarkAsImportant": ("removeLabelIds", "IMPORTANT"), "smartLabelToApply": ("addLabelIds", "CATEGORY_"), } self.dic_label_api2xml = {v: k for k, v in self.dic_label_xml2api.items()} self.dic_size_xml2api = { "s_sl": "larger", "s_ss": "smaller", } self.dic_size_api2xml = {v: k for k, v in self.dic_size_xml2api.items()} if ((isinstance(self.opt["command"], str) and self.opt["command"] == "") or (isinstance(self.opt["command"], str) and not self.opt["command"]) or (self.opt["command"] is None)): return if isinstance(self.opt["command"], str): self.opt["command"] = [self.opt["command"]] for command in self.opt["command"]: if command == "xml2yaml": self.read_xml() self.write_yaml() elif command == "yaml2xml": self.read_yaml() self.yaml2xml() self.write_xml() elif command == "get": self.get() elif command == "put": self.put() elif command == "show_filters": self.show_filters() elif command == "show_filters_xml": self.show_filters_xml() elif command == "show_filters_api": self.show_filters_api() elif command == "show_labels_api": self.show_labels_api() else: raise ValueError("Invalid command: %s" % command) def clean(self): self.filters = None self.filters_api = None self.labels = None def authentication(self, storage): return run_flow( OAuth2WebServerFlow( client_id=self.opt["client_id"], client_secret=self.opt["client_secret"], scope=['https://www.googleapis.com/auth/gmail.modify']), storage, argparser.parse_args([])) def build_service(self, rebuild=False): conf_dir = os.path.dirname(self.opt["auth_file"]) if not os.path.isdir(conf_dir): os.makedirs(conf_dir) storage = Storage(self.opt["auth_file"]) credentials = storage.get() if rebuild or credentials is None or credentials.invalid: credentials = self.authentication(storage) http = httplib2.Http() http = credentials.authorize(http) service = build('gmail', 'v1', http=http) prof = service.users().getProfile(userId='me').execute() self.opt["address "] = prof['emailAddress'] if self.opt["debug"]: print("My address: %s" % self.opt["address"]) return service def get_service(self): if self.service is None: self.service = self.build_service() return self.service def dump_xml(self, stream=sys.stdout): my_filter = xml.dom.minidom.parseString( ET.tostring(self.filters_xml)).toprettyxml( indent=" ", encoding="utf-8") if sys.version_info.major > 2: my_filter = my_filter.decode() stream.write(my_filter) def write_xml(self): with open(self.opt["output_xml"], "w") as f: self.dump_xml(f) def read_xml(self): namespaces = {str(x[0]) if x[0] != "" else "atom": x[1] for _, x in ET.iterparse(self.opt["input_xml"], events=['start-ns'])} for k, v in namespaces.items(): if k == "atom": k = "" ET.register_namespace(k, v) tree = ET.parse(self.opt["input_xml"]) self.filters_xml = tree.getroot() for e in self.filters_xml.iter('*'): if e.text is not None: e.text = e.text.strip() if e.tail is not None: e.tail = e.tail.strip() filter_list = [] for e in self.filters_xml.findall("./atom:entry", namespaces): properties = {} for p in e.findall("./apps:property", namespaces): name = p.get("name") value = p.get("value") properties[name] = DoubleQuotedScalarString(value) if "size" not in properties: for noneed in ["sizeOperator", "sizeUnit"]: if noneed in properties: del properties[noneed] filter_list.append(properties) self.filters = {"namespaces": namespaces, "filter": filter_list} def show_filters_xml(self): self.read_xml() if self.opt["raw"]: self.dump_xml() else: self.dump_yaml() def dump_yaml(self, stream=sys.stdout): yaml = ruamel.yaml.YAML() yaml.indent(mapping=2, sequence=4, offset=2) yaml.dump(self.filters, stream=stream) def write_yaml(self): with open(self.opt["output_yaml"], "w") as stream: self.dump_yaml(stream) def read_yaml(self): yaml = ruamel.yaml.YAML() with open(self.opt["input_yaml"], "r") as f: self.filters = yaml.load(f) if "namespaces" in self.filters: if "" in self.filters["namespaces"]: self.filters["namespaces"]["atom"] =\ self.filters["namespaces"][""] del self.filters["namespaces"][""] else: self.filters["namespaces"] = { "atom": "http://www.w3.org/2005/Atom", "apps": "http://schemas.google.com/apps/2006" } def show_filters(self): self.read_yaml() self.dump_yaml() def yaml2xml(self): for k, v in self.filters["namespaces"].items(): if k == "atom": k = "" ET.register_namespace(k, v) self.filters_xml = ET.Element('feed') for f in self.filters["filters"]: if "label" in f: labels = f["label"] if isinstance( f["label"], list) else [f["label"]] del f["label"] else: labels = [None] for label in labels: entry = ET.SubElement( self.filters_xml, "{" + self.filters["namespaces"]["atom"] + "}" + 'entry') properties = f if label is not None: properties["label"] = label for k, v in properties.items(): ET.SubElement( entry, "{" + self.filters["namespaces"]["apps"] + "}property", attrib={"name": k, "value": v} ) def get_filters(self): if self.filters_api is not None: return self.filters_api = self.get_service().users().settings( ).filters().list(userId='me').execute() def show_filters_api(self): self.get_filters() if self.opt["raw"]: print(self.filters_api) return for f in self.filters_api["filter"]: print("criteria:") for a in f["criteria"]: print(" %s: %s" % (a, f["criteria"][a])) print("action:") for a in f["action"]: print(" %s: %s" % (a, f["action"][a])) print("") def get(self): self.get_filters() self.filters = { "filter": [], "namespaces": { "apps": "http://schemas.google.com/apps/2006", "atom": "http://www.w3.org/2005/Atom", } } for f in self.filters_api["filter"]: xml_filter = {} for k, v in f["criteria"].items(): key, value = self.criteria_api2xml(k, v) xml_filter[key] = value if key == "size": xml_filter["sizeUnit"] = "s_sb" for k, v in f["action"].items(): if k == "addLabelIds": for label in v: if ("addLabelIds", label) in self.dic_label_api2xml: xml_filter[self.dic_label_api2xml[ ("addLabelIds", label)]] = "true" continue if "label" not in xml_filter: xml_filter["label"] = [] xml_filter["label"].append(self.label_id2name(label)) elif k == "removeLabelIds": for label in v: if ("removeLabelIds", label) in self.dic_label_api2xml: xml_filter[self.dic_label_api2xml[ ("removeLabelIds", label)]] = "true" continue else: xml_filter[k] = v self.filters["filter"].append(xml_filter) self.write_yaml() def criteria_api2xml(self, key, value): key_out = key value_out = value if key in self.dic_api2xml: key_out = self.dic_api2xml[key] if key == "sizeComparison": value_out = self.dic_size_api2xml[value] return key_out, value_out def action_api2xml(self, key, value): key_out = key value_out = value if key in self.dic_label_api2xml: key_out = self.dic_api2xml[key] if key == "sizeComparison": value_out = self.dic_size_api2xml[value] if key == "query": key_out = "hasTheWord" elif key == "query": key_out = "hasTheWord" return key_out, value_out def get_labels(self): if self.labels is not None: return self.labels = self.get_service().users().labels().list( userId='me').execute()["labels"] def show_labels_api(self): self.get_labels() if self.opt["raw"]: for l in self.labels: print(l) return print("===User labels===") for l in sorted(filter(lambda x: x["type"] == "user", self.labels), key=lambda x: x["name"]): print("%s: %s" % (l["name"], l["id"])) print("\n===System labels===") for l in sorted(filter(lambda x: x["type"] != "user", self.labels), key=lambda x: x["name"]): print("%s: %s" % (l["name"], l["id"])) def label_id2name(self, label_id): self.get_labels() candidates = filter(lambda x: x["id"] == label_id, self.labels) if len(candidates) != 1: print("Wrong label id? id: %s, candidates: %s" % (label_id, str(candidates))) sys.exit(1) return candidates[0]["name"] def label_name2id(self, name): self.get_labels() candidates = filter(lambda x: x["name"] == name, self.labels) if len(candidates) != 1: print("Wrong label name? candidates: %s" % str(candidates)) sys.exit(1) return candidates[0]["id"] def put(self): pass @staticmethod def get_parser(): input_xml_parser = argparse.ArgumentParser(add_help=False) input_xml_parser.add_argument( "-x", "--input_xml", action="store", dest="input_xml", default="mailFilters.xml", help="Input XML file name") input_yaml_parser = argparse.ArgumentParser(add_help=False) input_yaml_parser.add_argument( "-y", "--input_yaml", action="store", dest="input_yaml", default="mailFilters.yaml", help="Input YAML file name") output_xml_parser = argparse.ArgumentParser(add_help=False) output_xml_parser.add_argument( "-X", "--output_xml", action="store", dest="output_xml", default="filters.xml", help="Output XML file name") output_yaml_parser = argparse.ArgumentParser(add_help=False) output_yaml_parser.add_argument( "-Y", "--output_yaml", action="store", dest="output_yaml", default="mailFilters.yaml", help="Output YAML file name") auth_file_parser = argparse.ArgumentParser(add_help=False) auth_file_parser.add_argument( "--auth_file", action="store", dest="auth_file", default=AUTH_FILE, help="Gmail API authentication file") client_id_parser = argparse.ArgumentParser(add_help=False) client_id_parser.add_argument( "--client_id", action="store", dest="client_id", default=None, help="Google Client ID") client_secret_parser = argparse.ArgumentParser(add_help=False) client_secret_parser.add_argument( "--client_secret", action="store", dest="client_secret", default=None, help="Google Client ID") raw_parser = argparse.ArgumentParser(add_help=False) raw_parser.add_argument( "-r", "--raw", action="store_true", dest="raw", default=False, help="Show raw output") debug_parser = argparse.ArgumentParser(add_help=False) debug_parser.add_argument( "-d", "--debug", action="store_true", dest="debug", default=False, help="Enable debug mode") dummy_parser = argparse.ArgumentParser(add_help=False) dummy_parser.add_argument( "--dummy", action="store_true", dest="dummy", default=True, help=argparse.SUPPRESS) parser = argparse.ArgumentParser( prog=__prog__, add_help=True, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__description__, parents=[input_xml_parser, input_yaml_parser, output_xml_parser, output_yaml_parser, auth_file_parser, client_id_parser, client_secret_parser, raw_parser, debug_parser, dummy_parser], ) subparsers = parser.add_subparsers( title="subcommands", metavar="[command]", help="", dest="command") desc = "Convert filters from XML to YAML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_xml_parser, output_yaml_parser, debug_parser] } if sys.version_info.major > 2: kwargs["aliases"] = ["x2y"] subparsers.add_parser("xml2yaml", **kwargs) desc = "Convert filters from YAML to XML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_yaml_parser, output_xml_parser, debug_parser] } if sys.version_info.major > 2: kwargs["aliases"] = ["y2x"] subparsers.add_parser("yaml2xml", **kwargs) desc = "Get filters by using API and make YAML" subparsers.add_parser( "get", description=desc, help=desc, formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[output_yaml_parser, auth_file_parser, client_id_parser, client_secret_parser, debug_parser]) desc = "Put filters in YAML file to Gmail server by using API" subparsers.add_parser( "put", description=desc, help=desc, formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[input_yaml_parser, auth_file_parser, client_id_parser, client_secret_parser, debug_parser]) desc = "Show filters in YAML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_yaml_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show", "s"] subparsers.add_parser("show_filters", **kwargs) desc = "Show filters in XML" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [input_xml_parser, raw_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show_xml", "sx"] subparsers.add_parser("show_filter_xml", **kwargs) desc = "Show filters taken by API" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [auth_file_parser, client_id_parser, client_secret_parser, raw_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show_api", "sa"] subparsers.add_parser("show_filterapi", **kwargs) desc = "Show labels taken by API" kwargs = { "description": desc, "help": desc, "formatter_class": argparse.ArgumentDefaultsHelpFormatter, "parents": [auth_file_parser, client_id_parser, client_secret_parser, raw_parser, debug_parser], } if sys.version_info.major > 2: kwargs["aliases"] = ["show_labels", "sl"] subparsers.add_parser("show_labels_api", **kwargs) return parser def main(): parser = GmailFilterManager.get_parser() if len(sys.argv) == 1: parser.print_help() return args = parser.parse_args() GmailFilterManager(**vars(args)) if __name__ == '__main__': main()
en
0.627759
#!/usr/bin/env python Gmail Filter Manager https://github.com/rcmdnk/gfm # Set defaults if it is called w/o args (check dummy in argparser)
2.286493
2
cybox/objects/pipe_object.py
Mattlk13/python-cybox
40
6613286
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. from mixbox import fields import cybox.bindings.pipe_object as pipe_binding from cybox.common import ObjectProperties, String class Pipe(ObjectProperties): _namespace = 'http://cybox.mitre.org/objects#PipeObject-2' _XSI_NS = "PipeObj" _XSI_TYPE = "PipeObjectType" _binding = pipe_binding _binding_class = pipe_binding.PipeObjectType name = fields.TypedField("Name", String) named = fields.TypedField("named")
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. from mixbox import fields import cybox.bindings.pipe_object as pipe_binding from cybox.common import ObjectProperties, String class Pipe(ObjectProperties): _namespace = 'http://cybox.mitre.org/objects#PipeObject-2' _XSI_NS = "PipeObj" _XSI_TYPE = "PipeObjectType" _binding = pipe_binding _binding_class = pipe_binding.PipeObjectType name = fields.TypedField("Name", String) named = fields.TypedField("named")
en
0.821898
# Copyright (c) 2017, The MITRE Corporation. All rights reserved. # See LICENSE.txt for complete terms. #PipeObject-2'
2.148387
2
exercism/python/armstrong-numbers/armstrong_numbers.py
TGITS/programming-workouts
0
6613287
<gh_stars>0 def is_armstrong_number(number): number_as_string = str(number) power = len(number_as_string) return number == sum([int(digit)**power for digit in number_as_string])
def is_armstrong_number(number): number_as_string = str(number) power = len(number_as_string) return number == sum([int(digit)**power for digit in number_as_string])
none
1
3.75345
4
flashCards.py
simonbaeuerle/image2txt
0
6613288
""" Read image files from ./imgs_flashCards Perform OCR scan on images Save text to txt file Created 01.05.2020 by <NAME> """ import pytesseract from PIL import Image import os # Repository root directory root = "C:\\Users\\Simon\\Git\\image2txt\\" # If you don't have tesseract executable in your PATH, include the following: pytesseract.pytesseract.tesseract_cmd = "C:\\Program Files\\Tesseract-OCR\\tesseract.exe" TESSDATA_PREFIX = "C:\\Program Files\\Tesseract-OCR" # Set working directory to repository root directory os.chdir(root) # Path to folder with images to be scanned path = os.path.join(root, "imgs_flashCards\\") # Loop over images result = "" for i in range(1,5): # Image name template imgfile = str(i) + ".jpeg" # Append images to result string result += "\n \n ---------------------------------------------------- \n \n" result += pytesseract.image_to_string(Image.open(os.path.join(path, imgfile))) # Output scanned text to txt file txtFile = open("Output.txt", "w") txtFile.write(result) txtFile.close()
""" Read image files from ./imgs_flashCards Perform OCR scan on images Save text to txt file Created 01.05.2020 by <NAME> """ import pytesseract from PIL import Image import os # Repository root directory root = "C:\\Users\\Simon\\Git\\image2txt\\" # If you don't have tesseract executable in your PATH, include the following: pytesseract.pytesseract.tesseract_cmd = "C:\\Program Files\\Tesseract-OCR\\tesseract.exe" TESSDATA_PREFIX = "C:\\Program Files\\Tesseract-OCR" # Set working directory to repository root directory os.chdir(root) # Path to folder with images to be scanned path = os.path.join(root, "imgs_flashCards\\") # Loop over images result = "" for i in range(1,5): # Image name template imgfile = str(i) + ".jpeg" # Append images to result string result += "\n \n ---------------------------------------------------- \n \n" result += pytesseract.image_to_string(Image.open(os.path.join(path, imgfile))) # Output scanned text to txt file txtFile = open("Output.txt", "w") txtFile.write(result) txtFile.close()
en
0.852235
Read image files from ./imgs_flashCards Perform OCR scan on images Save text to txt file Created 01.05.2020 by <NAME> # Repository root directory # If you don't have tesseract executable in your PATH, include the following: # Set working directory to repository root directory # Path to folder with images to be scanned # Loop over images # Image name template # Append images to result string # Output scanned text to txt file
3.433743
3
cards/models.py
zachtib/DraftingWithFriends
0
6613289
from django.db import models class MagicSet(models.Model): id = models.UUIDField(primary_key=True) code = models.CharField(max_length=5) name = models.CharField(max_length=100) class Card(models.Model): id = models.UUIDField(primary_key=True) name = models.CharField(max_length=200) mana_cost = models.CharField(max_length=20) class CardFace(models.Model): card = models.ForeignKey(Card, related_name='faces', on_delete=models.CASCADE) name = models.CharField(max_length=200) mana_cost = models.CharField(max_length=20) class Printing(models.Model): magic_set = models.ForeignKey(MagicSet, related_name='printings', on_delete=models.CASCADE) card = models.ForeignKey(Card, related_name='printings', on_delete=models.CASCADE)
from django.db import models class MagicSet(models.Model): id = models.UUIDField(primary_key=True) code = models.CharField(max_length=5) name = models.CharField(max_length=100) class Card(models.Model): id = models.UUIDField(primary_key=True) name = models.CharField(max_length=200) mana_cost = models.CharField(max_length=20) class CardFace(models.Model): card = models.ForeignKey(Card, related_name='faces', on_delete=models.CASCADE) name = models.CharField(max_length=200) mana_cost = models.CharField(max_length=20) class Printing(models.Model): magic_set = models.ForeignKey(MagicSet, related_name='printings', on_delete=models.CASCADE) card = models.ForeignKey(Card, related_name='printings', on_delete=models.CASCADE)
none
1
2.296904
2
python-codes/m2_curso_em_video_estruturas_de_controle/ex068.0.py
lucasportella/learning_repo
0
6613290
from random import randint print('=-'* 15) print('VAMOS JOGAR PAR OU ÍMPAR') print('=-'* 15) v = 0 while True: pc = randint(0,10) valor = int(input('Digite um valor: ')) if (valor + pc) % 2 == 0: resultadosoma = 'PAR' else: resultadosoma = 'ÍMPAR' escolha = str(input('Par ou ímpar? [P/I] ')).upper().strip() print('-' * 30) print(f'Você jogou {valor} e o computador jogou {pc}. Total de {valor + pc} deu {resultadosoma}') print('-' * 30) if escolha in 'PPAR': if (valor + pc) % 2 == 0: print('Você GANHOU!!') print('Vamos jogar novamente...') print('=-' * 15) v += 1 else: print('Voce PERDEU!!') print('-=' * 15) print(f'GAME OVER! Você venceu {v} vez(es).') break elif escolha in 'ÍIMPARÍMPAR': if (valor + pc) % 2 != 0: print('Você GANHOU!!') print('Vamos jogar novamente...') print('=-' * 15) v += 1 else: print('Você PERDEU!!') print('-=' * 15) print(f'GAME OVER! Você venceu {v} vez(es).') break
from random import randint print('=-'* 15) print('VAMOS JOGAR PAR OU ÍMPAR') print('=-'* 15) v = 0 while True: pc = randint(0,10) valor = int(input('Digite um valor: ')) if (valor + pc) % 2 == 0: resultadosoma = 'PAR' else: resultadosoma = 'ÍMPAR' escolha = str(input('Par ou ímpar? [P/I] ')).upper().strip() print('-' * 30) print(f'Você jogou {valor} e o computador jogou {pc}. Total de {valor + pc} deu {resultadosoma}') print('-' * 30) if escolha in 'PPAR': if (valor + pc) % 2 == 0: print('Você GANHOU!!') print('Vamos jogar novamente...') print('=-' * 15) v += 1 else: print('Voce PERDEU!!') print('-=' * 15) print(f'GAME OVER! Você venceu {v} vez(es).') break elif escolha in 'ÍIMPARÍMPAR': if (valor + pc) % 2 != 0: print('Você GANHOU!!') print('Vamos jogar novamente...') print('=-' * 15) v += 1 else: print('Você PERDEU!!') print('-=' * 15) print(f'GAME OVER! Você venceu {v} vez(es).') break
none
1
3.720107
4
datadrivenpdes/core/__init__.py
snes-chalmers/data-driven-advection
26
6613291
<filename>datadrivenpdes/core/__init__.py """Core functionality.""" from datadrivenpdes.core import builders from datadrivenpdes.core import equations from datadrivenpdes.core import geometry from datadrivenpdes.core import grids from datadrivenpdes.core import integrate from datadrivenpdes.core import models from datadrivenpdes.core import polynomials from datadrivenpdes.core import readers from datadrivenpdes.core import states from datadrivenpdes.core import tensor_ops from datadrivenpdes.core import utils
<filename>datadrivenpdes/core/__init__.py """Core functionality.""" from datadrivenpdes.core import builders from datadrivenpdes.core import equations from datadrivenpdes.core import geometry from datadrivenpdes.core import grids from datadrivenpdes.core import integrate from datadrivenpdes.core import models from datadrivenpdes.core import polynomials from datadrivenpdes.core import readers from datadrivenpdes.core import states from datadrivenpdes.core import tensor_ops from datadrivenpdes.core import utils
en
0.90823
Core functionality.
1.191905
1
synapse/rest/admin/background_updates.py
dsonck92/synapse
9,945
6613292
<filename>synapse/rest/admin/background_updates.py # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from http import HTTPStatus from typing import TYPE_CHECKING, Tuple from synapse.api.errors import SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, ) from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) class BackgroundUpdateEnabledRestServlet(RestServlet): """Allows temporarily disabling background updates""" PATTERNS = admin_patterns("/background_updates/enabled$") def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._data_stores = hs.get_datastores() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) # We need to check that all configured databases have updates enabled. # (They *should* all be in sync.) enabled = all(db.updates.enabled for db in self._data_stores.databases) return HTTPStatus.OK, {"enabled": enabled} async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) body = parse_json_object_from_request(request) enabled = body.get("enabled", True) if not isinstance(enabled, bool): raise SynapseError( HTTPStatus.BAD_REQUEST, "'enabled' parameter must be a boolean" ) for db in self._data_stores.databases: db.updates.enabled = enabled # If we're re-enabling them ensure that we start the background # process again. if enabled: db.updates.start_doing_background_updates() return HTTPStatus.OK, {"enabled": enabled} class BackgroundUpdateRestServlet(RestServlet): """Fetch information about background updates""" PATTERNS = admin_patterns("/background_updates/status$") def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._data_stores = hs.get_datastores() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) # We need to check that all configured databases have updates enabled. # (They *should* all be in sync.) enabled = all(db.updates.enabled for db in self._data_stores.databases) current_updates = {} for db in self._data_stores.databases: update = db.updates.get_current_update() if not update: continue current_updates[db.name()] = { "name": update.name, "total_item_count": update.total_item_count, "total_duration_ms": update.total_duration_ms, "average_items_per_ms": update.average_items_per_ms(), } return HTTPStatus.OK, {"enabled": enabled, "current_updates": current_updates} class BackgroundUpdateStartJobRestServlet(RestServlet): """Allows to start specific background updates""" PATTERNS = admin_patterns("/background_updates/start_job$") def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._store = hs.get_datastore() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) body = parse_json_object_from_request(request) assert_params_in_dict(body, ["job_name"]) job_name = body["job_name"] if job_name == "populate_stats_process_rooms": jobs = [ { "update_name": "populate_stats_process_rooms", "progress_json": "{}", }, ] elif job_name == "regenerate_directory": jobs = [ { "update_name": "populate_user_directory_createtables", "progress_json": "{}", "depends_on": "", }, { "update_name": "populate_user_directory_process_rooms", "progress_json": "{}", "depends_on": "populate_user_directory_createtables", }, { "update_name": "populate_user_directory_process_users", "progress_json": "{}", "depends_on": "populate_user_directory_process_rooms", }, { "update_name": "populate_user_directory_cleanup", "progress_json": "{}", "depends_on": "populate_user_directory_process_users", }, ] else: raise SynapseError(HTTPStatus.BAD_REQUEST, "Invalid job_name") try: await self._store.db_pool.simple_insert_many( table="background_updates", values=jobs, desc=f"admin_api_run_{job_name}", ) except self._store.db_pool.engine.module.IntegrityError: raise SynapseError( HTTPStatus.BAD_REQUEST, "Job %s is already in queue of background updates." % (job_name,), ) self._store.db_pool.updates.start_doing_background_updates() return HTTPStatus.OK, {}
<filename>synapse/rest/admin/background_updates.py # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from http import HTTPStatus from typing import TYPE_CHECKING, Tuple from synapse.api.errors import SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, ) from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) class BackgroundUpdateEnabledRestServlet(RestServlet): """Allows temporarily disabling background updates""" PATTERNS = admin_patterns("/background_updates/enabled$") def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._data_stores = hs.get_datastores() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) # We need to check that all configured databases have updates enabled. # (They *should* all be in sync.) enabled = all(db.updates.enabled for db in self._data_stores.databases) return HTTPStatus.OK, {"enabled": enabled} async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) body = parse_json_object_from_request(request) enabled = body.get("enabled", True) if not isinstance(enabled, bool): raise SynapseError( HTTPStatus.BAD_REQUEST, "'enabled' parameter must be a boolean" ) for db in self._data_stores.databases: db.updates.enabled = enabled # If we're re-enabling them ensure that we start the background # process again. if enabled: db.updates.start_doing_background_updates() return HTTPStatus.OK, {"enabled": enabled} class BackgroundUpdateRestServlet(RestServlet): """Fetch information about background updates""" PATTERNS = admin_patterns("/background_updates/status$") def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._data_stores = hs.get_datastores() async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) # We need to check that all configured databases have updates enabled. # (They *should* all be in sync.) enabled = all(db.updates.enabled for db in self._data_stores.databases) current_updates = {} for db in self._data_stores.databases: update = db.updates.get_current_update() if not update: continue current_updates[db.name()] = { "name": update.name, "total_item_count": update.total_item_count, "total_duration_ms": update.total_duration_ms, "average_items_per_ms": update.average_items_per_ms(), } return HTTPStatus.OK, {"enabled": enabled, "current_updates": current_updates} class BackgroundUpdateStartJobRestServlet(RestServlet): """Allows to start specific background updates""" PATTERNS = admin_patterns("/background_updates/start_job$") def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._store = hs.get_datastore() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self._auth, request) body = parse_json_object_from_request(request) assert_params_in_dict(body, ["job_name"]) job_name = body["job_name"] if job_name == "populate_stats_process_rooms": jobs = [ { "update_name": "populate_stats_process_rooms", "progress_json": "{}", }, ] elif job_name == "regenerate_directory": jobs = [ { "update_name": "populate_user_directory_createtables", "progress_json": "{}", "depends_on": "", }, { "update_name": "populate_user_directory_process_rooms", "progress_json": "{}", "depends_on": "populate_user_directory_createtables", }, { "update_name": "populate_user_directory_process_users", "progress_json": "{}", "depends_on": "populate_user_directory_process_rooms", }, { "update_name": "populate_user_directory_cleanup", "progress_json": "{}", "depends_on": "populate_user_directory_process_users", }, ] else: raise SynapseError(HTTPStatus.BAD_REQUEST, "Invalid job_name") try: await self._store.db_pool.simple_insert_many( table="background_updates", values=jobs, desc=f"admin_api_run_{job_name}", ) except self._store.db_pool.engine.module.IntegrityError: raise SynapseError( HTTPStatus.BAD_REQUEST, "Job %s is already in queue of background updates." % (job_name,), ) self._store.db_pool.updates.start_doing_background_updates() return HTTPStatus.OK, {}
en
0.819577
# Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Allows temporarily disabling background updates # We need to check that all configured databases have updates enabled. # (They *should* all be in sync.) # If we're re-enabling them ensure that we start the background # process again. Fetch information about background updates # We need to check that all configured databases have updates enabled. # (They *should* all be in sync.) Allows to start specific background updates
2.073012
2
machine_learning/torch_autoencoder/src/data/tqdmupto.py
iimuz/til
4
6613293
<filename>machine_learning/torch_autoencoder/src/data/tqdmupto.py """tqdm for downloader.""" import sys if "ipykernel" in sys.modules: from tqdm.notebook import tqdm else: from tqdm import tqdm class TqdmUpTo(tqdm): """Provides `update_to(n)` which uses `tqdm.update(delta_n)`. Args: tqdm (tqdm): tqdm """ def update_to(self, b: int = 1, bsize: int = 1, tsize: int = None): """ update function Args: b (int, optional): Number of blocks transferred. Defaults to 1. bsize (int, optional): Size of each block (in tqdm units). Defaults to 1. tsize ([type], optional): Total size (in tqdm units). Defaults to None. """ if tsize is not None: self.total = tsize self.update(b * bsize - self.n)
<filename>machine_learning/torch_autoencoder/src/data/tqdmupto.py """tqdm for downloader.""" import sys if "ipykernel" in sys.modules: from tqdm.notebook import tqdm else: from tqdm import tqdm class TqdmUpTo(tqdm): """Provides `update_to(n)` which uses `tqdm.update(delta_n)`. Args: tqdm (tqdm): tqdm """ def update_to(self, b: int = 1, bsize: int = 1, tsize: int = None): """ update function Args: b (int, optional): Number of blocks transferred. Defaults to 1. bsize (int, optional): Size of each block (in tqdm units). Defaults to 1. tsize ([type], optional): Total size (in tqdm units). Defaults to None. """ if tsize is not None: self.total = tsize self.update(b * bsize - self.n)
en
0.421154
tqdm for downloader. Provides `update_to(n)` which uses `tqdm.update(delta_n)`. Args: tqdm (tqdm): tqdm update function Args: b (int, optional): Number of blocks transferred. Defaults to 1. bsize (int, optional): Size of each block (in tqdm units). Defaults to 1. tsize ([type], optional): Total size (in tqdm units). Defaults to None.
2.647347
3
pyvod/vod.py
sixP-NaraKa/pyvod-chat
0
6613294
<reponame>sixP-NaraKa/pyvod-chat """ pyvod-chat - a simple tool to download a past Twitch.tv broadcasts (VOD) chat comments! Available on GitHub (+ documentation): https://github.com/sixP-NaraKa/pyvod-chat """ import os from collections import namedtuple import requests import dotenv from .vodchat import VODChat from .exceptions import TwitchApiException # check for a .env file and get the "twitch-client-id" which we need to identify the application for use with the API # this is NOT the same as the Client-Secret, which we do not need here # if there is no such Client-ID or it is empty, we use a default Client-ID dotenv.load_dotenv() _client_id = os.getenv("twitch-client-id") _client_id = _client_id if _client_id else "r52h1i1phlvyxs0sdi3ooam1b3w62g" # needed request headers _headers = {"client-id": _client_id, "accept": "application/vnd.twitchtv.v5+json"} # additional API url vod_url = "https://api.twitch.tv/v5/videos/{vod_id}" class VOD: """ Represents a Twitch.tv VOD (video-on-demand). The main entry point, responsible for getting the VODChat via `get_videochat()` as well as some basic information about the VOD itself and the channel the VOD belongs to (see below). Additional Class Attributes ----- The following are class attributes which contain basic information about the VOD and its associated channel. - `vod_title`: the title of the VOD - `vod_length`: the length of the VOD in hours - `vod_date`: the date when the broadcast has been streamed - `vod_views`: the total amount of VOD views - `channel`: the name of the channel associated with the VOD - `channel_id`: the channel ID - `channel_views`: total channel views - `channel_followers`: total channel followers - `channel_broadcaster_type`: whether the channel is partnered or a affiliate :param vod_id: the VOD ID to fetch the information for """ def __init__(self, vod_id): self.vod_id = str(vod_id) self._basic_data = self._get_basic_data() self.vod_title = self._basic_data.title self.vod_length = self._basic_data.vod_length self.vod_date = self._basic_data.created_at self.vod_game = self._basic_data.game self.vod_views = self._basic_data.views self.channel = self._basic_data.channel_name self.channel_id = self._basic_data.channel_id self.channel_views = self._basic_data.channel_views self.channel_followers = self._basic_data.channel_followers self.channel_broadcaster_type = self._basic_data.channel_type def __repr__(self): return "<VOD vod_title={0.vod_title!r} vod_length={0.vod_length!r} vod_date={0.vod_date!r} " \ "vod_game={0.vod_game!r} vod_views={0.vod_views!r} " \ "channel={0.channel!r} channel_id={0.channel_id!r} channel_views={0.channel_views!r} " \ "channel_followers={0.channel_followers!r} channel_broadcaster_type={0.channel_broadcaster_type!r}>"\ .format(self) def _get_basic_data(self) -> namedtuple: """ Gets some basic information in regards to the VOD and the channel associated with the VOD. :return: the basic data as a `namedtuple` """ response = requests.get(url=vod_url.format(vod_id=self.vod_id), headers=_headers) response_body = response.json() if response.status_code != 200: msg_from_twitch = response_body["message"] raise TwitchApiException( "Twitch API responded with '{1}' (status code {0}). Expected 200 (OK)." .format(response.status_code, msg_from_twitch) ) BasicData = namedtuple("BasicData", "title views created_at game vod_length " "channel_name channel_id channel_date " "channel_views channel_followers channel_type" ) data = BasicData( response_body["title"], # VOD title response_body["views"], # VOD views response_body["created_at"], # VOD stream date response_body["game"], # what game has been streamed response_body["length"], # VOD length in seconds (seconds / 3600 = hours) response_body["channel"]["display_name"], # channel name (streamer name) response_body["channel"]["_id"], # channel ID response_body["channel"]["created_at"], # channel creation date response_body["channel"]["views"], # total channel views response_body["channel"]["followers"], # total channel followers response_body["channel"]["broadcaster_type"] # broadcaster type (i.e. partner or affiliate, etc.) ) data = data._replace(vod_length=round(float(data.vod_length) / 3600, 2)) return data def get_vodchat(self) -> VODChat: """ Gets the VODChat associated with the `vod_id`. :return: the VODChat """ vod_chat = VODChat(vod_id=self.vod_id, _basic_vod_data=self._basic_data, _headers=_headers) return vod_chat
""" pyvod-chat - a simple tool to download a past Twitch.tv broadcasts (VOD) chat comments! Available on GitHub (+ documentation): https://github.com/sixP-NaraKa/pyvod-chat """ import os from collections import namedtuple import requests import dotenv from .vodchat import VODChat from .exceptions import TwitchApiException # check for a .env file and get the "twitch-client-id" which we need to identify the application for use with the API # this is NOT the same as the Client-Secret, which we do not need here # if there is no such Client-ID or it is empty, we use a default Client-ID dotenv.load_dotenv() _client_id = os.getenv("twitch-client-id") _client_id = _client_id if _client_id else "r52h1i1phlvyxs0sdi3ooam1b3w62g" # needed request headers _headers = {"client-id": _client_id, "accept": "application/vnd.twitchtv.v5+json"} # additional API url vod_url = "https://api.twitch.tv/v5/videos/{vod_id}" class VOD: """ Represents a Twitch.tv VOD (video-on-demand). The main entry point, responsible for getting the VODChat via `get_videochat()` as well as some basic information about the VOD itself and the channel the VOD belongs to (see below). Additional Class Attributes ----- The following are class attributes which contain basic information about the VOD and its associated channel. - `vod_title`: the title of the VOD - `vod_length`: the length of the VOD in hours - `vod_date`: the date when the broadcast has been streamed - `vod_views`: the total amount of VOD views - `channel`: the name of the channel associated with the VOD - `channel_id`: the channel ID - `channel_views`: total channel views - `channel_followers`: total channel followers - `channel_broadcaster_type`: whether the channel is partnered or a affiliate :param vod_id: the VOD ID to fetch the information for """ def __init__(self, vod_id): self.vod_id = str(vod_id) self._basic_data = self._get_basic_data() self.vod_title = self._basic_data.title self.vod_length = self._basic_data.vod_length self.vod_date = self._basic_data.created_at self.vod_game = self._basic_data.game self.vod_views = self._basic_data.views self.channel = self._basic_data.channel_name self.channel_id = self._basic_data.channel_id self.channel_views = self._basic_data.channel_views self.channel_followers = self._basic_data.channel_followers self.channel_broadcaster_type = self._basic_data.channel_type def __repr__(self): return "<VOD vod_title={0.vod_title!r} vod_length={0.vod_length!r} vod_date={0.vod_date!r} " \ "vod_game={0.vod_game!r} vod_views={0.vod_views!r} " \ "channel={0.channel!r} channel_id={0.channel_id!r} channel_views={0.channel_views!r} " \ "channel_followers={0.channel_followers!r} channel_broadcaster_type={0.channel_broadcaster_type!r}>"\ .format(self) def _get_basic_data(self) -> namedtuple: """ Gets some basic information in regards to the VOD and the channel associated with the VOD. :return: the basic data as a `namedtuple` """ response = requests.get(url=vod_url.format(vod_id=self.vod_id), headers=_headers) response_body = response.json() if response.status_code != 200: msg_from_twitch = response_body["message"] raise TwitchApiException( "Twitch API responded with '{1}' (status code {0}). Expected 200 (OK)." .format(response.status_code, msg_from_twitch) ) BasicData = namedtuple("BasicData", "title views created_at game vod_length " "channel_name channel_id channel_date " "channel_views channel_followers channel_type" ) data = BasicData( response_body["title"], # VOD title response_body["views"], # VOD views response_body["created_at"], # VOD stream date response_body["game"], # what game has been streamed response_body["length"], # VOD length in seconds (seconds / 3600 = hours) response_body["channel"]["display_name"], # channel name (streamer name) response_body["channel"]["_id"], # channel ID response_body["channel"]["created_at"], # channel creation date response_body["channel"]["views"], # total channel views response_body["channel"]["followers"], # total channel followers response_body["channel"]["broadcaster_type"] # broadcaster type (i.e. partner or affiliate, etc.) ) data = data._replace(vod_length=round(float(data.vod_length) / 3600, 2)) return data def get_vodchat(self) -> VODChat: """ Gets the VODChat associated with the `vod_id`. :return: the VODChat """ vod_chat = VODChat(vod_id=self.vod_id, _basic_vod_data=self._basic_data, _headers=_headers) return vod_chat
en
0.851669
pyvod-chat - a simple tool to download a past Twitch.tv broadcasts (VOD) chat comments! Available on GitHub (+ documentation): https://github.com/sixP-NaraKa/pyvod-chat # check for a .env file and get the "twitch-client-id" which we need to identify the application for use with the API # this is NOT the same as the Client-Secret, which we do not need here # if there is no such Client-ID or it is empty, we use a default Client-ID # needed request headers # additional API url Represents a Twitch.tv VOD (video-on-demand). The main entry point, responsible for getting the VODChat via `get_videochat()` as well as some basic information about the VOD itself and the channel the VOD belongs to (see below). Additional Class Attributes ----- The following are class attributes which contain basic information about the VOD and its associated channel. - `vod_title`: the title of the VOD - `vod_length`: the length of the VOD in hours - `vod_date`: the date when the broadcast has been streamed - `vod_views`: the total amount of VOD views - `channel`: the name of the channel associated with the VOD - `channel_id`: the channel ID - `channel_views`: total channel views - `channel_followers`: total channel followers - `channel_broadcaster_type`: whether the channel is partnered or a affiliate :param vod_id: the VOD ID to fetch the information for Gets some basic information in regards to the VOD and the channel associated with the VOD. :return: the basic data as a `namedtuple` # VOD title # VOD views # VOD stream date # what game has been streamed # VOD length in seconds (seconds / 3600 = hours) # channel name (streamer name) # channel ID # channel creation date # total channel views # total channel followers # broadcaster type (i.e. partner or affiliate, etc.) Gets the VODChat associated with the `vod_id`. :return: the VODChat
2.767197
3
test/test_histogram.py
zachjweiner/pystella
14
6613295
<gh_stars>10-100 __copyright__ = "Copyright (C) 2019 <NAME>" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np import pyopencl as cl import pyopencl.clrandom as clr import pystella as ps import pytest from common import get_errs from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.filterwarnings( "ignore::pyopencl.characterize.CLCharacterizationWarning") @pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory") @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize("num_bins", [123, 1024, 1493]) @pytest.mark.parametrize("_N", [256, 1200]) def test_trivial_histogram(ctx_factory, grid_shape, proc_shape, dtype, num_bins, _N, timing=False): ctx = ctx_factory() grid_shape = (_N,)*3 queue = cl.CommandQueue(ctx) h = 1 mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape) rank_shape, _ = mpi.get_rank_shape_start(grid_shape) histograms = { "a": (13., 1), "b": (10.3, 2), "c": (100.9, 3), } hist = ps.Histogrammer(mpi, histograms, num_bins, dtype, rank_shape=rank_shape) result = hist(queue) for key, (_b, weight) in histograms.items(): res = result[key] b = int(np.floor(_b)) expected = weight * np.product(grid_shape) assert res[b] == expected, \ f"{key}: result={res[b]}, {expected=}, ratio={res[b]/expected}" assert np.all(res[res != res[b]] == 0.) @pytest.mark.filterwarnings( "ignore::pyopencl.characterize.CLCharacterizationWarning") @pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory") @pytest.mark.parametrize("dtype", ["float64", "float32"]) @pytest.mark.parametrize("num_bins", [123, 1024, 1493]) def test_histogram(ctx_factory, grid_shape, proc_shape, dtype, num_bins, timing=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) h = 1 mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape) rank_shape, _ = mpi.get_rank_shape_start(grid_shape) if np.dtype(dtype) in (np.dtype("float64"), np.dtype("complex128")): max_rtol, avg_rtol = 1e-10, 1e-11 else: max_rtol, avg_rtol = 5e-4, 5e-5 from pymbolic import var _fx = ps.Field("fx") histograms = { "count": (var("abs")(_fx) * num_bins, 1), "squared": (var("abs")(_fx) * num_bins, _fx**2), } hist = ps.Histogrammer(mpi, histograms, num_bins, dtype, rank_shape=rank_shape) rng = clr.ThreefryGenerator(ctx, seed=12321) fx = rng.uniform(queue, rank_shape, dtype) fx_h = fx.get() result = hist(queue, fx=fx) res = result["count"] assert np.sum(res.astype("int64")) == np.product(grid_shape), \ f"Count histogram doesn't sum to grid_size ({np.sum(res)})" bins = np.linspace(0, 1, num_bins+1).astype(dtype) weights = np.ones_like(fx_h) np_res = np.histogram(fx_h, bins=bins, weights=weights)[0] np_res = mpi.allreduce(np_res) max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"Histogrammer inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" res = result["squared"] np_res = np.histogram(fx_h, bins=bins, weights=fx_h**2)[0] np_res = mpi.allreduce(np_res) max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"Histogrammer with weights inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" if timing: from common import timer t = timer(lambda: hist(queue, fx=fx)) print(f"histogram took {t:.3f} ms for {grid_shape=}, {dtype=}") @pytest.mark.filterwarnings( "ignore::pyopencl.characterize.CLCharacterizationWarning") @pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory") @pytest.mark.parametrize("dtype", ["float64", "float32"]) def test_field_histogram(ctx_factory, grid_shape, proc_shape, dtype, timing=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) h = 1 mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape) rank_shape, _ = mpi.get_rank_shape_start(grid_shape) pencil_shape = tuple(Ni + 2 * h for Ni in rank_shape) num_bins = 432 if np.dtype(dtype) in (np.dtype("float64"), np.dtype("complex128")): max_rtol, avg_rtol = 1e-10, 1e-11 else: max_rtol, avg_rtol = 5e-4, 5e-5 hist = ps.FieldHistogrammer(mpi, num_bins, dtype, rank_shape=rank_shape, halo_shape=h) rng = clr.ThreefryGenerator(ctx, seed=12321) fx = rng.uniform(queue, (2, 2)+pencil_shape, dtype, a=-1.2, b=3.) fx_h = fx.get()[..., h:-h, h:-h, h:-h] result = hist(fx) outer_shape = fx.shape[:-3] from itertools import product slices = list(product(*[range(n) for n in outer_shape])) for slc in slices: res = result["linear"][slc] np_res = np.histogram(fx_h[slc], bins=result["linear_bins"][slc])[0] np_res = mpi.allreduce(np_res) max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"linear Histogrammer inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" res = result["log"][slc] bins = result["log_bins"][slc] # avoid FPA comparison issues # numpy sometimes doesn't count the actual maximum/minimum eps = 1e-14 if np.dtype(dtype) == np.dtype("float64") else 1e-4 bins[0] *= (1 - eps) bins[-1] *= (1 + eps) np_res = np.histogram(np.abs(fx_h[slc]), bins=bins)[0] np_res = mpi.allreduce(np_res) norm = np.maximum(np.abs(res), np.abs(np_res)) norm[norm == 0.] = 1. max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"log Histogrammer inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" if timing: from common import timer t = timer(lambda: hist(fx[0, 0])) print(f"field histogram took {t:.3f} ms for {grid_shape=}, {dtype=}") if __name__ == "__main__": from common import parser args = parser.parse_args() test_trivial_histogram( ps.choose_device_and_make_context, grid_shape=args.grid_shape, proc_shape=args.proc_shape, dtype="float64", timing=args.timing, num_bins=1493, _N=1200, ) test_histogram( ps.choose_device_and_make_context, grid_shape=args.grid_shape, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing, num_bins=1001, ) test_field_histogram( ps.choose_device_and_make_context, grid_shape=args.grid_shape, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing )
__copyright__ = "Copyright (C) 2019 <NAME>" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np import pyopencl as cl import pyopencl.clrandom as clr import pystella as ps import pytest from common import get_errs from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.filterwarnings( "ignore::pyopencl.characterize.CLCharacterizationWarning") @pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory") @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize("num_bins", [123, 1024, 1493]) @pytest.mark.parametrize("_N", [256, 1200]) def test_trivial_histogram(ctx_factory, grid_shape, proc_shape, dtype, num_bins, _N, timing=False): ctx = ctx_factory() grid_shape = (_N,)*3 queue = cl.CommandQueue(ctx) h = 1 mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape) rank_shape, _ = mpi.get_rank_shape_start(grid_shape) histograms = { "a": (13., 1), "b": (10.3, 2), "c": (100.9, 3), } hist = ps.Histogrammer(mpi, histograms, num_bins, dtype, rank_shape=rank_shape) result = hist(queue) for key, (_b, weight) in histograms.items(): res = result[key] b = int(np.floor(_b)) expected = weight * np.product(grid_shape) assert res[b] == expected, \ f"{key}: result={res[b]}, {expected=}, ratio={res[b]/expected}" assert np.all(res[res != res[b]] == 0.) @pytest.mark.filterwarnings( "ignore::pyopencl.characterize.CLCharacterizationWarning") @pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory") @pytest.mark.parametrize("dtype", ["float64", "float32"]) @pytest.mark.parametrize("num_bins", [123, 1024, 1493]) def test_histogram(ctx_factory, grid_shape, proc_shape, dtype, num_bins, timing=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) h = 1 mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape) rank_shape, _ = mpi.get_rank_shape_start(grid_shape) if np.dtype(dtype) in (np.dtype("float64"), np.dtype("complex128")): max_rtol, avg_rtol = 1e-10, 1e-11 else: max_rtol, avg_rtol = 5e-4, 5e-5 from pymbolic import var _fx = ps.Field("fx") histograms = { "count": (var("abs")(_fx) * num_bins, 1), "squared": (var("abs")(_fx) * num_bins, _fx**2), } hist = ps.Histogrammer(mpi, histograms, num_bins, dtype, rank_shape=rank_shape) rng = clr.ThreefryGenerator(ctx, seed=12321) fx = rng.uniform(queue, rank_shape, dtype) fx_h = fx.get() result = hist(queue, fx=fx) res = result["count"] assert np.sum(res.astype("int64")) == np.product(grid_shape), \ f"Count histogram doesn't sum to grid_size ({np.sum(res)})" bins = np.linspace(0, 1, num_bins+1).astype(dtype) weights = np.ones_like(fx_h) np_res = np.histogram(fx_h, bins=bins, weights=weights)[0] np_res = mpi.allreduce(np_res) max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"Histogrammer inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" res = result["squared"] np_res = np.histogram(fx_h, bins=bins, weights=fx_h**2)[0] np_res = mpi.allreduce(np_res) max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"Histogrammer with weights inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" if timing: from common import timer t = timer(lambda: hist(queue, fx=fx)) print(f"histogram took {t:.3f} ms for {grid_shape=}, {dtype=}") @pytest.mark.filterwarnings( "ignore::pyopencl.characterize.CLCharacterizationWarning") @pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory") @pytest.mark.parametrize("dtype", ["float64", "float32"]) def test_field_histogram(ctx_factory, grid_shape, proc_shape, dtype, timing=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) h = 1 mpi = ps.DomainDecomposition(proc_shape, h, grid_shape=grid_shape) rank_shape, _ = mpi.get_rank_shape_start(grid_shape) pencil_shape = tuple(Ni + 2 * h for Ni in rank_shape) num_bins = 432 if np.dtype(dtype) in (np.dtype("float64"), np.dtype("complex128")): max_rtol, avg_rtol = 1e-10, 1e-11 else: max_rtol, avg_rtol = 5e-4, 5e-5 hist = ps.FieldHistogrammer(mpi, num_bins, dtype, rank_shape=rank_shape, halo_shape=h) rng = clr.ThreefryGenerator(ctx, seed=12321) fx = rng.uniform(queue, (2, 2)+pencil_shape, dtype, a=-1.2, b=3.) fx_h = fx.get()[..., h:-h, h:-h, h:-h] result = hist(fx) outer_shape = fx.shape[:-3] from itertools import product slices = list(product(*[range(n) for n in outer_shape])) for slc in slices: res = result["linear"][slc] np_res = np.histogram(fx_h[slc], bins=result["linear_bins"][slc])[0] np_res = mpi.allreduce(np_res) max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"linear Histogrammer inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" res = result["log"][slc] bins = result["log_bins"][slc] # avoid FPA comparison issues # numpy sometimes doesn't count the actual maximum/minimum eps = 1e-14 if np.dtype(dtype) == np.dtype("float64") else 1e-4 bins[0] *= (1 - eps) bins[-1] *= (1 + eps) np_res = np.histogram(np.abs(fx_h[slc]), bins=bins)[0] np_res = mpi.allreduce(np_res) norm = np.maximum(np.abs(res), np.abs(np_res)) norm[norm == 0.] = 1. max_err, avg_err = get_errs(res, np_res) assert max_err < max_rtol and avg_err < avg_rtol, \ f"log Histogrammer inaccurate for grid_shape={grid_shape}" \ f": {max_err=}, {avg_err=}" if timing: from common import timer t = timer(lambda: hist(fx[0, 0])) print(f"field histogram took {t:.3f} ms for {grid_shape=}, {dtype=}") if __name__ == "__main__": from common import parser args = parser.parse_args() test_trivial_histogram( ps.choose_device_and_make_context, grid_shape=args.grid_shape, proc_shape=args.proc_shape, dtype="float64", timing=args.timing, num_bins=1493, _N=1200, ) test_histogram( ps.choose_device_and_make_context, grid_shape=args.grid_shape, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing, num_bins=1001, ) test_field_histogram( ps.choose_device_and_make_context, grid_shape=args.grid_shape, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing )
en
0.765683
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # noqa # avoid FPA comparison issues # numpy sometimes doesn't count the actual maximum/minimum
1.802699
2
ilxutils/ilxutils/graph_edge_cases.py
tmsincomb/pyontutils
11
6613296
<reponame>tmsincomb/pyontutils from pyontutils.utils import * from pyontutils.core import * from pyontutils.closed_namespaces import * edge_cases = { #Definitions 'definition:': 'definition:', 'skos:definition': 'definition:', 'definition': 'definition:', 'NIFRID:birnlexDefinition': 'definition:', 'NIFRID:externallySourcedDefinition': 'definition:', #ExistingIds 'ilxtr:existingIds': 'ilxtr:existingIds', #LABELS 'rdfs:label': 'rdfs:label', 'skos:prefLabel': 'rdfs:label', #SUPERCLASSES 'rdfs:subClassOf': 'rdfs:subClassOf', #SYNONYMS 'oboInOwl:hasExactSynonym': 'NIFRID:synonym', #'oboInOwl:hasNarrowSynonym' : 'NIFRID:synonym', #'oboInOwl:hasBroadSynonym' : 'NIFRID:synonym', 'oboInOwl:hasRelatedSynonym': 'NIFRID:synonym', 'go:systematic_synonym': 'NIFRID:synonym', 'NIFRID:synonym': 'NIFRID:synonym', #TYPE 'rdf:type': 'rdf:type', } full = { #'':None, # safety (now managed directly in the curies file) #'EHDAA2':'http://purl.obolibrary.org/obo/EHDAA2_', # FIXME needs to go in curie map? 'hasRole': 'http://purl.obolibrary.org/obo/RO_0000087', 'inheresIn': 'http://purl.obolibrary.org/obo/RO_0000052', 'bearerOf': 'http://purl.obolibrary.org/obo/RO_0000053', 'participatesIn': 'http://purl.obolibrary.org/obo/RO_0000056', 'hasParticipant': 'http://purl.obolibrary.org/obo/RO_0000057', 'adjacentTo': 'http://purl.obolibrary.org/obo/RO_0002220', 'derivesFrom': 'http://purl.obolibrary.org/obo/RO_0001000', 'derivesInto': 'http://purl.obolibrary.org/obo/RO_0001001', 'agentIn': 'http://purl.obolibrary.org/obo/RO_0002217', 'hasAgent': 'http://purl.obolibrary.org/obo/RO_0002218', 'containedIn': 'http://purl.obolibrary.org/obo/RO_0001018', 'contains': 'http://purl.obolibrary.org/obo/RO_0001019', 'locatedIn': 'http://purl.obolibrary.org/obo/RO_0001025', 'locationOf': 'http://purl.obolibrary.org/obo/RO_0001015', 'toward': 'http://purl.obolibrary.org/obo/RO_0002503', 'replacedBy': 'http://purl.obolibrary.org/obo/IAO_0100001', 'hasCurStatus': 'http://purl.obolibrary.org/obo/IAO_0000114', 'definition': 'http://purl.obolibrary.org/obo/IAO_0000115', 'editorNote': 'http://purl.obolibrary.org/obo/IAO_0000116', 'termEditor': 'http://purl.obolibrary.org/obo/IAO_0000117', 'altTerm': 'http://purl.obolibrary.org/obo/IAO_0000118', 'defSource': 'http://purl.obolibrary.org/obo/IAO_0000119', 'termsMerged': 'http://purl.obolibrary.org/obo/IAO_0000227', 'obsReason': 'http://purl.obolibrary.org/obo/IAO_0000231', 'curatorNote': 'http://purl.obolibrary.org/obo/IAO_0000232', 'importedFrom': 'http://purl.obolibrary.org/obo/IAO_0000412', 'partOf': 'http://purl.obolibrary.org/obo/BFO_0000050', 'hasPart': 'http://purl.obolibrary.org/obo/BFO_0000051', } normal = { 'ILX': 'http://uri.interlex.org/base/ilx_', 'ilx': 'http://uri.interlex.org/base/', 'ilxr': 'http://uri.interlex.org/base/readable/', 'ilxtr': 'http://uri.interlex.org/tgbugs/uris/readable/', # for obo files with 'fake' namespaces, http://uri.interlex.org/fakeobo/uris/ eqiv to purl.obolibrary.org/ 'fobo': 'http://uri.interlex.org/fakeobo/uris/obo/', 'PROTEGE': 'http://protege.stanford.edu/plugins/owl/protege#', 'ILXREPLACE': 'http://ILXREPLACE.org/', 'TEMP': interlex_namespace('temp/uris'), 'FIXME': 'http://FIXME.org/', 'NIFTTL': 'http://ontology.neuinfo.org/NIF/ttl/', 'NIFRET': 'http://ontology.neuinfo.org/NIF/Retired/NIF-Retired.owl#', 'NLXWIKI': 'http://neurolex.org/wiki/', 'dc': 'http://purl.org/dc/elements/1.1/', 'dcterms': 'http://purl.org/dc/terms/', 'dctypes': 'http://purl.org/dc/dcmitype/', # FIXME there is no agreement on qnames # FIXME a thought: was # intentionally used to increase user privacy? or is this just happenstance? 'nsu': 'http://www.FIXME.org/nsupper#', 'oboInOwl': 'http://www.geneontology.org/formats/oboInOwl#', 'owl': 'http://www.w3.org/2002/07/owl#', 'ro': 'http://www.obofoundry.org/ro/ro.owl#', 'skos': 'http://www.w3.org/2004/02/skos/core#', 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', 'prov': 'http://www.w3.org/ns/prov#', } expo = { 'ilxtr:existingId': 'ilxtr:identifier', 'oboInOwl:hasAlternativeId': 'ilxtr:identifier', 'NIFRID:isReplacedByClass': 'replacedBy:', 'skos:editorialNote': 'editorNote:', 'ncbitaxon:has_rank': 'NIFRID:hasTaxonRank', }
from pyontutils.utils import * from pyontutils.core import * from pyontutils.closed_namespaces import * edge_cases = { #Definitions 'definition:': 'definition:', 'skos:definition': 'definition:', 'definition': 'definition:', 'NIFRID:birnlexDefinition': 'definition:', 'NIFRID:externallySourcedDefinition': 'definition:', #ExistingIds 'ilxtr:existingIds': 'ilxtr:existingIds', #LABELS 'rdfs:label': 'rdfs:label', 'skos:prefLabel': 'rdfs:label', #SUPERCLASSES 'rdfs:subClassOf': 'rdfs:subClassOf', #SYNONYMS 'oboInOwl:hasExactSynonym': 'NIFRID:synonym', #'oboInOwl:hasNarrowSynonym' : 'NIFRID:synonym', #'oboInOwl:hasBroadSynonym' : 'NIFRID:synonym', 'oboInOwl:hasRelatedSynonym': 'NIFRID:synonym', 'go:systematic_synonym': 'NIFRID:synonym', 'NIFRID:synonym': 'NIFRID:synonym', #TYPE 'rdf:type': 'rdf:type', } full = { #'':None, # safety (now managed directly in the curies file) #'EHDAA2':'http://purl.obolibrary.org/obo/EHDAA2_', # FIXME needs to go in curie map? 'hasRole': 'http://purl.obolibrary.org/obo/RO_0000087', 'inheresIn': 'http://purl.obolibrary.org/obo/RO_0000052', 'bearerOf': 'http://purl.obolibrary.org/obo/RO_0000053', 'participatesIn': 'http://purl.obolibrary.org/obo/RO_0000056', 'hasParticipant': 'http://purl.obolibrary.org/obo/RO_0000057', 'adjacentTo': 'http://purl.obolibrary.org/obo/RO_0002220', 'derivesFrom': 'http://purl.obolibrary.org/obo/RO_0001000', 'derivesInto': 'http://purl.obolibrary.org/obo/RO_0001001', 'agentIn': 'http://purl.obolibrary.org/obo/RO_0002217', 'hasAgent': 'http://purl.obolibrary.org/obo/RO_0002218', 'containedIn': 'http://purl.obolibrary.org/obo/RO_0001018', 'contains': 'http://purl.obolibrary.org/obo/RO_0001019', 'locatedIn': 'http://purl.obolibrary.org/obo/RO_0001025', 'locationOf': 'http://purl.obolibrary.org/obo/RO_0001015', 'toward': 'http://purl.obolibrary.org/obo/RO_0002503', 'replacedBy': 'http://purl.obolibrary.org/obo/IAO_0100001', 'hasCurStatus': 'http://purl.obolibrary.org/obo/IAO_0000114', 'definition': 'http://purl.obolibrary.org/obo/IAO_0000115', 'editorNote': 'http://purl.obolibrary.org/obo/IAO_0000116', 'termEditor': 'http://purl.obolibrary.org/obo/IAO_0000117', 'altTerm': 'http://purl.obolibrary.org/obo/IAO_0000118', 'defSource': 'http://purl.obolibrary.org/obo/IAO_0000119', 'termsMerged': 'http://purl.obolibrary.org/obo/IAO_0000227', 'obsReason': 'http://purl.obolibrary.org/obo/IAO_0000231', 'curatorNote': 'http://purl.obolibrary.org/obo/IAO_0000232', 'importedFrom': 'http://purl.obolibrary.org/obo/IAO_0000412', 'partOf': 'http://purl.obolibrary.org/obo/BFO_0000050', 'hasPart': 'http://purl.obolibrary.org/obo/BFO_0000051', } normal = { 'ILX': 'http://uri.interlex.org/base/ilx_', 'ilx': 'http://uri.interlex.org/base/', 'ilxr': 'http://uri.interlex.org/base/readable/', 'ilxtr': 'http://uri.interlex.org/tgbugs/uris/readable/', # for obo files with 'fake' namespaces, http://uri.interlex.org/fakeobo/uris/ eqiv to purl.obolibrary.org/ 'fobo': 'http://uri.interlex.org/fakeobo/uris/obo/', 'PROTEGE': 'http://protege.stanford.edu/plugins/owl/protege#', 'ILXREPLACE': 'http://ILXREPLACE.org/', 'TEMP': interlex_namespace('temp/uris'), 'FIXME': 'http://FIXME.org/', 'NIFTTL': 'http://ontology.neuinfo.org/NIF/ttl/', 'NIFRET': 'http://ontology.neuinfo.org/NIF/Retired/NIF-Retired.owl#', 'NLXWIKI': 'http://neurolex.org/wiki/', 'dc': 'http://purl.org/dc/elements/1.1/', 'dcterms': 'http://purl.org/dc/terms/', 'dctypes': 'http://purl.org/dc/dcmitype/', # FIXME there is no agreement on qnames # FIXME a thought: was # intentionally used to increase user privacy? or is this just happenstance? 'nsu': 'http://www.FIXME.org/nsupper#', 'oboInOwl': 'http://www.geneontology.org/formats/oboInOwl#', 'owl': 'http://www.w3.org/2002/07/owl#', 'ro': 'http://www.obofoundry.org/ro/ro.owl#', 'skos': 'http://www.w3.org/2004/02/skos/core#', 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', 'prov': 'http://www.w3.org/ns/prov#', } expo = { 'ilxtr:existingId': 'ilxtr:identifier', 'oboInOwl:hasAlternativeId': 'ilxtr:identifier', 'NIFRID:isReplacedByClass': 'replacedBy:', 'skos:editorialNote': 'editorNote:', 'ncbitaxon:has_rank': 'NIFRID:hasTaxonRank', }
en
0.596399
#Definitions #ExistingIds #LABELS #SUPERCLASSES #SYNONYMS #'oboInOwl:hasNarrowSynonym' : 'NIFRID:synonym', #'oboInOwl:hasBroadSynonym' : 'NIFRID:synonym', #TYPE #'':None, # safety (now managed directly in the curies file) #'EHDAA2':'http://purl.obolibrary.org/obo/EHDAA2_', # FIXME needs to go in curie map? # for obo files with 'fake' namespaces, http://uri.interlex.org/fakeobo/uris/ eqiv to purl.obolibrary.org/ #', #', # FIXME there is no agreement on qnames # FIXME a thought: was # intentionally used to increase user privacy? or is this just happenstance? #', #', #', #', #', #', #', #',
1.566781
2
main.py
code-s-witch/project2
0
6613297
from search import graph import networkx as nx def main(): adjlist_file = './data/tiny_network.adjlist' start = '<NAME>' #* CASE 1: If there's no end node, just return a list with the order of traversal # instantiate a graph object with a start node only since we only care about traversal here not shortest path bfs_traversal = graph.Graph(adjlist_file).bfs(start) networkx_graph = nx.read_adjlist(adjlist_file, create_using=nx.DiGraph, delimiter=";") #for testing using nx API # assert that all nodes are being traversed (ie. returns the right number of nodes) assert(nx.number_of_nodes(networkx_graph) == len(bfs_traversal)) assert all(node in networkx_graph for node in bfs_traversal) == True # assert that all nodes are being traversed in the right order nx_bfs_traversal_nodes = list(nx.bfs_tree(networkx_graph, start).nodes) assert all([True for node in range(len(bfs_traversal)) if bfs_traversal[node] == nx_bfs_traversal_nodes[node]]) == True #Case 2 adjlist_file = './data/small_file.adjlist' start = 'A' end = 'G' truth_set = ['A', 'C', 'G'] bfs_path = graph.Graph(adjlist_file).bfs(start, end) """Source: https://www.geeksforgeeks.org/python-check-if-two-lists-are-identical/""" assert(len(truth_set)== len(bfs_path) and len(truth_set) == sum([1 for i, j in zip(truth_set, bfs_path) if i == j])) print(bfs_path) #* CASE 3: If there is an end node and a path does not exist, return None #strategy - give the bfs search method a end node that does not exist to esnure no path exists (Brenda's suggestion) adjlist_file = './data/citation_network.adjlist' start = '<NAME>' end = '<NAME>' # instantiate a graph object with a start node only since we only care about traversal here not shortest path bfs_path = graph.Graph(adjlist_file).bfs(start, end) assert(bfs_path == None) if __name__ == "__main__": main()
from search import graph import networkx as nx def main(): adjlist_file = './data/tiny_network.adjlist' start = '<NAME>' #* CASE 1: If there's no end node, just return a list with the order of traversal # instantiate a graph object with a start node only since we only care about traversal here not shortest path bfs_traversal = graph.Graph(adjlist_file).bfs(start) networkx_graph = nx.read_adjlist(adjlist_file, create_using=nx.DiGraph, delimiter=";") #for testing using nx API # assert that all nodes are being traversed (ie. returns the right number of nodes) assert(nx.number_of_nodes(networkx_graph) == len(bfs_traversal)) assert all(node in networkx_graph for node in bfs_traversal) == True # assert that all nodes are being traversed in the right order nx_bfs_traversal_nodes = list(nx.bfs_tree(networkx_graph, start).nodes) assert all([True for node in range(len(bfs_traversal)) if bfs_traversal[node] == nx_bfs_traversal_nodes[node]]) == True #Case 2 adjlist_file = './data/small_file.adjlist' start = 'A' end = 'G' truth_set = ['A', 'C', 'G'] bfs_path = graph.Graph(adjlist_file).bfs(start, end) """Source: https://www.geeksforgeeks.org/python-check-if-two-lists-are-identical/""" assert(len(truth_set)== len(bfs_path) and len(truth_set) == sum([1 for i, j in zip(truth_set, bfs_path) if i == j])) print(bfs_path) #* CASE 3: If there is an end node and a path does not exist, return None #strategy - give the bfs search method a end node that does not exist to esnure no path exists (Brenda's suggestion) adjlist_file = './data/citation_network.adjlist' start = '<NAME>' end = '<NAME>' # instantiate a graph object with a start node only since we only care about traversal here not shortest path bfs_path = graph.Graph(adjlist_file).bfs(start, end) assert(bfs_path == None) if __name__ == "__main__": main()
en
0.841602
#* CASE 1: If there's no end node, just return a list with the order of traversal # instantiate a graph object with a start node only since we only care about traversal here not shortest path #for testing using nx API # assert that all nodes are being traversed (ie. returns the right number of nodes) # assert that all nodes are being traversed in the right order #Case 2 Source: https://www.geeksforgeeks.org/python-check-if-two-lists-are-identical/ #* CASE 3: If there is an end node and a path does not exist, return None #strategy - give the bfs search method a end node that does not exist to esnure no path exists (Brenda's suggestion) # instantiate a graph object with a start node only since we only care about traversal here not shortest path
3.751383
4
src/ab_testing/server_starter.py
JouniVatanen/NLP-and-Deep-Learning
1
6613298
<gh_stars>1-10 # From the course: Bayesin Machine Learning in Python: A/B Testing # https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing # https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future # sudo pip install -U future import numpy as np from flask import Flask, jsonify, request from scipy.stats import beta # create an app app = Flask(__name__) # define bandits # there's no "pull arm" here # since that's technically now the user/client class Bandit: def __init__(self, name): self.name = name def sample(self): # TODO return 1 # TODO - what else does the Bandit need to do? # initialize bandits banditA = Bandit('A') banditB = Bandit('B') @app.route('/get_ad') def get_ad(): # TODO return jsonify({'advertisement_id': 'A'}) @app.route('/click_ad', methods=['POST']) def click_ad(): result = 'OK' if request.form['advertisement_id'] == 'A': # TODO pass elif request.form['advertisement_id'] == 'B': # TODO pass else: result = 'Invalid Input.' # nothing to return really return jsonify({'result': result}) if __name__ == '__main__': app.run(host='127.0.0.1', port='8888')
# From the course: Bayesin Machine Learning in Python: A/B Testing # https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing # https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing from __future__ import print_function, division from builtins import range # Note: you may need to update your version of future # sudo pip install -U future import numpy as np from flask import Flask, jsonify, request from scipy.stats import beta # create an app app = Flask(__name__) # define bandits # there's no "pull arm" here # since that's technically now the user/client class Bandit: def __init__(self, name): self.name = name def sample(self): # TODO return 1 # TODO - what else does the Bandit need to do? # initialize bandits banditA = Bandit('A') banditB = Bandit('B') @app.route('/get_ad') def get_ad(): # TODO return jsonify({'advertisement_id': 'A'}) @app.route('/click_ad', methods=['POST']) def click_ad(): result = 'OK' if request.form['advertisement_id'] == 'A': # TODO pass elif request.form['advertisement_id'] == 'B': # TODO pass else: result = 'Invalid Input.' # nothing to return really return jsonify({'result': result}) if __name__ == '__main__': app.run(host='127.0.0.1', port='8888')
en
0.839657
# From the course: Bayesin Machine Learning in Python: A/B Testing # https://deeplearningcourses.com/c/bayesian-machine-learning-in-python-ab-testing # https://www.udemy.com/bayesian-machine-learning-in-python-ab-testing # Note: you may need to update your version of future # sudo pip install -U future # create an app # define bandits # there's no "pull arm" here # since that's technically now the user/client # TODO # TODO - what else does the Bandit need to do? # initialize bandits # TODO # TODO # TODO # nothing to return really
2.975045
3
backend/biz/migrations/0008_auto_20200504_0823.py
Critter-Inc/critter
0
6613299
<reponame>Critter-Inc/critter # Generated by Django 3.0.5 on 2020-05-04 08:23 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('biz', '0007_auto_20200502_1700'), ] operations = [ migrations.AlterUniqueTogether( name='hours', unique_together={('biz', 'weekday', 'from_hour')}, ), ]
# Generated by Django 3.0.5 on 2020-05-04 08:23 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('biz', '0007_auto_20200502_1700'), ] operations = [ migrations.AlterUniqueTogether( name='hours', unique_together={('biz', 'weekday', 'from_hour')}, ), ]
en
0.776979
# Generated by Django 3.0.5 on 2020-05-04 08:23
1.637087
2
openid_connect_op/decorators.py
rerobins/django-openid-op
2
6613300
<filename>openid_connect_op/decorators.py import base64 from functools import wraps from django.conf import settings from django.http.response import HttpResponseForbidden from django.utils import timezone from openid_connect_op.models import OpenIDToken def get_access_token_from_auth_header(auth_header): auth_header = auth_header.strip() if not auth_header.startswith('Bearer '): raise AttributeError('Not a Bearer token') return auth_header[7:] def get_access_token_from_post_data(request): post_data = request.POST if request.content_type != 'application/x-www-form-urlencoded': return None if 'access_token' not in post_data: return None return post_data['access_token'] def get_access_token_from_uri_query(get_data): if 'access_token' not in get_data: return None return get_data['access_token'] def extract_access_token(request, forbidden_on_not_present): auth_header = request.META.get('HTTP_AUTHORIZATION', None) access_token = None try: if auth_header is not None: access_token = get_access_token_from_auth_header(auth_header) if request.method == 'POST' and not access_token: access_token = get_access_token_from_post_data(request) if not access_token: access_token = get_access_token_from_uri_query(request.GET) if not access_token: if forbidden_on_not_present: return HttpResponseForbidden('No access token provided') else: return None try: db_access_token = OpenIDToken.objects.get(token_hash=OpenIDToken.get_token_hash(access_token)) if db_access_token.expiration < timezone.now(): return HttpResponseForbidden('Expired access token') return db_access_token except OpenIDToken.DoesNotExist: return HttpResponseForbidden('Provided access token %s not found' % access_token) except BaseException as e: return HttpResponseForbidden('Access error: %s' % e) def access_token_required(disabled_settings=None): def wrapper(func): """ Check that access token is present on the request and is valid. If not, returns HttpResponseForbidden. request is annotated with the database access token, i.e. isinstance(req.openid_access_token, OpenIDToken) == True """ @wraps(func) def inner(request, *args, **kwargs): if disabled_settings: if getattr(settings, disabled_settings, False): request.openid_access_token = None return func(request, *args, **kwargs) db_access_token = extract_access_token(request, True) if isinstance(db_access_token, HttpResponseForbidden): return db_access_token request.openid_access_token = db_access_token return func(request, *args, **kwargs) return inner return wrapper __all__ = ('access_token_required',)
<filename>openid_connect_op/decorators.py import base64 from functools import wraps from django.conf import settings from django.http.response import HttpResponseForbidden from django.utils import timezone from openid_connect_op.models import OpenIDToken def get_access_token_from_auth_header(auth_header): auth_header = auth_header.strip() if not auth_header.startswith('Bearer '): raise AttributeError('Not a Bearer token') return auth_header[7:] def get_access_token_from_post_data(request): post_data = request.POST if request.content_type != 'application/x-www-form-urlencoded': return None if 'access_token' not in post_data: return None return post_data['access_token'] def get_access_token_from_uri_query(get_data): if 'access_token' not in get_data: return None return get_data['access_token'] def extract_access_token(request, forbidden_on_not_present): auth_header = request.META.get('HTTP_AUTHORIZATION', None) access_token = None try: if auth_header is not None: access_token = get_access_token_from_auth_header(auth_header) if request.method == 'POST' and not access_token: access_token = get_access_token_from_post_data(request) if not access_token: access_token = get_access_token_from_uri_query(request.GET) if not access_token: if forbidden_on_not_present: return HttpResponseForbidden('No access token provided') else: return None try: db_access_token = OpenIDToken.objects.get(token_hash=OpenIDToken.get_token_hash(access_token)) if db_access_token.expiration < timezone.now(): return HttpResponseForbidden('Expired access token') return db_access_token except OpenIDToken.DoesNotExist: return HttpResponseForbidden('Provided access token %s not found' % access_token) except BaseException as e: return HttpResponseForbidden('Access error: %s' % e) def access_token_required(disabled_settings=None): def wrapper(func): """ Check that access token is present on the request and is valid. If not, returns HttpResponseForbidden. request is annotated with the database access token, i.e. isinstance(req.openid_access_token, OpenIDToken) == True """ @wraps(func) def inner(request, *args, **kwargs): if disabled_settings: if getattr(settings, disabled_settings, False): request.openid_access_token = None return func(request, *args, **kwargs) db_access_token = extract_access_token(request, True) if isinstance(db_access_token, HttpResponseForbidden): return db_access_token request.openid_access_token = db_access_token return func(request, *args, **kwargs) return inner return wrapper __all__ = ('access_token_required',)
en
0.771609
Check that access token is present on the request and is valid. If not, returns HttpResponseForbidden. request is annotated with the database access token, i.e. isinstance(req.openid_access_token, OpenIDToken) == True
2.157284
2
src/hyperparameters_exploration/HyperparametersExploration.py
philipco/structured_noise
0
6613301
<filename>src/hyperparameters_exploration/HyperparametersExploration.py """ Created by <NAME>, 18th January 2022. """ import matplotlib matplotlib.rcParams.update({ "pgf.texsystem": "pdflatex", 'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False, 'text.latex.preamble': r'\usepackage{amsfonts}' }) import hashlib import os import sys import numpy as np from matplotlib import pyplot as plt from src.PickleHandler import pickle_saver, pickle_loader from src.Utilities import create_folder_if_not_existing from src.hyperparameters_exploration import Explorer from src.hyperparameters_exploration.Hyperparameters import Hyperparameters from src.hyperparameters_exploration.Metric import Metric class Exploration: def __init__(self, name, hyperparameters: Hyperparameters, explorer: Explorer, metrics: Metric): # super().__init__() self.name = name self.hyperparameters = hyperparameters self.explorer = explorer self.metrics = metrics self.nb_runs = 2 self.results = np.zeros((self.explorer.nb_outputs, self.nb_runs, self.hyperparameters.nb_hyperparams)) self.string_before_hash = str(self.hyperparameters.range_hyperparameters) self.hash_string = self.explorer.function.__name__ + "-" + hashlib.shake_256(self.string_before_hash.encode()).hexdigest(4) # returns a hash value of length 2*4 self.pickle_folder = "./pickle/exploration/" self.pictures_folder = "./pictures/exploration/" create_folder_if_not_existing(self.pickle_folder) create_folder_if_not_existing(self.pictures_folder) def run_exploration(self): print("====> Starting exploration : ", self.name) for idx_param in range(self.hyperparameters.nb_hyperparams): param = self.hyperparameters.range_hyperparameters[idx_param] print("Hyperparameter's value:", param) # self.blockPrint() for idx_run in range(self.nb_runs): output = self.explorer.explore(param) for i in range(len(output)): self.results[i, idx_run, idx_param] = self.metrics.compute(output[i]) pickle_saver(self, self.pickle_folder + self.hash_string) self.enablePrint() def load(self): self.results = pickle_loader(self.pickle_folder + self.hash_string).results[:,:,:-1] self.hyperparameters.range_hyperparameters = self.hyperparameters.range_hyperparameters[:-1] self.hyperparameters.nb_hyperparams -= 1 def plot_exploration(self): fig, ax = plt.subplots(figsize=(8, 7)) for i in range(len(self.explorer.outputs_label)): plt.errorbar(range(self.hyperparameters.nb_hyperparams), np.mean(self.results[i], axis=0), yerr=np.std(self.results[i], axis=0), label=self.explorer.outputs_label[i], lw=4) plt.xticks([i for i in range(0, len(self.hyperparameters.range_hyperparameters))], self.hyperparameters.range_hyperparameters, rotation=30, fontsize=15) plt.yticks(fontsize=15) ax.set_xlabel(self.hyperparameters.x_axis_label, fontsize=15) ax.set_ylabel(self.metrics.y_axis_label, fontsize=15) plt.title(self.hyperparameters.name, fontsize=15) plt.legend(loc='best', fontsize=15) ax.grid() plt.savefig('{0}.eps'.format(self.pictures_folder + self.hash_string), format='eps') plt.close() # Disable def blockPrint(self): sys.stdout = open(os.devnull, 'w') # Restore def enablePrint(self): sys.stdout = sys.__stdout__
<filename>src/hyperparameters_exploration/HyperparametersExploration.py """ Created by <NAME>, 18th January 2022. """ import matplotlib matplotlib.rcParams.update({ "pgf.texsystem": "pdflatex", 'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False, 'text.latex.preamble': r'\usepackage{amsfonts}' }) import hashlib import os import sys import numpy as np from matplotlib import pyplot as plt from src.PickleHandler import pickle_saver, pickle_loader from src.Utilities import create_folder_if_not_existing from src.hyperparameters_exploration import Explorer from src.hyperparameters_exploration.Hyperparameters import Hyperparameters from src.hyperparameters_exploration.Metric import Metric class Exploration: def __init__(self, name, hyperparameters: Hyperparameters, explorer: Explorer, metrics: Metric): # super().__init__() self.name = name self.hyperparameters = hyperparameters self.explorer = explorer self.metrics = metrics self.nb_runs = 2 self.results = np.zeros((self.explorer.nb_outputs, self.nb_runs, self.hyperparameters.nb_hyperparams)) self.string_before_hash = str(self.hyperparameters.range_hyperparameters) self.hash_string = self.explorer.function.__name__ + "-" + hashlib.shake_256(self.string_before_hash.encode()).hexdigest(4) # returns a hash value of length 2*4 self.pickle_folder = "./pickle/exploration/" self.pictures_folder = "./pictures/exploration/" create_folder_if_not_existing(self.pickle_folder) create_folder_if_not_existing(self.pictures_folder) def run_exploration(self): print("====> Starting exploration : ", self.name) for idx_param in range(self.hyperparameters.nb_hyperparams): param = self.hyperparameters.range_hyperparameters[idx_param] print("Hyperparameter's value:", param) # self.blockPrint() for idx_run in range(self.nb_runs): output = self.explorer.explore(param) for i in range(len(output)): self.results[i, idx_run, idx_param] = self.metrics.compute(output[i]) pickle_saver(self, self.pickle_folder + self.hash_string) self.enablePrint() def load(self): self.results = pickle_loader(self.pickle_folder + self.hash_string).results[:,:,:-1] self.hyperparameters.range_hyperparameters = self.hyperparameters.range_hyperparameters[:-1] self.hyperparameters.nb_hyperparams -= 1 def plot_exploration(self): fig, ax = plt.subplots(figsize=(8, 7)) for i in range(len(self.explorer.outputs_label)): plt.errorbar(range(self.hyperparameters.nb_hyperparams), np.mean(self.results[i], axis=0), yerr=np.std(self.results[i], axis=0), label=self.explorer.outputs_label[i], lw=4) plt.xticks([i for i in range(0, len(self.hyperparameters.range_hyperparameters))], self.hyperparameters.range_hyperparameters, rotation=30, fontsize=15) plt.yticks(fontsize=15) ax.set_xlabel(self.hyperparameters.x_axis_label, fontsize=15) ax.set_ylabel(self.metrics.y_axis_label, fontsize=15) plt.title(self.hyperparameters.name, fontsize=15) plt.legend(loc='best', fontsize=15) ax.grid() plt.savefig('{0}.eps'.format(self.pictures_folder + self.hash_string), format='eps') plt.close() # Disable def blockPrint(self): sys.stdout = open(os.devnull, 'w') # Restore def enablePrint(self): sys.stdout = sys.__stdout__
en
0.686406
Created by <NAME>, 18th January 2022. # super().__init__() # returns a hash value of length 2*4 # self.blockPrint() # Disable # Restore
2.773563
3
code/genetic_algorithm/callbacks.py
ahillbs/minimum_scan_cover
0
6613302
import pathlib import numpy as np from . import GeneticAlgorithm from database import Task class SaveCallback: def __init__(self, iterations, population_amount, task: Task, session): #self.generations = np.empty( # (iterations, population_amount), dtype=object) self.session = session self.task = task return def __call__(self, gen_algo: GeneticAlgorithm): if not self.session or not self.task: return try: self.session.add_all(gen_algo.genomes.tolist()) self.session.commit() #self.generations[gen_algo.generation][:] = gen_algo.genomes[:] #with open(self.savePath, 'wb') as fd_instances: # pickle.dump(self.generations[:gen_algo.generation+1], fd_instances) except Exception as e: print("Exception while calling the iteration end callback:", e) def update_callback(gen_algo: GeneticAlgorithm): mean = np.mean(gen_algo.fitness_val) max_fitness = np.max(gen_algo.fitness_val) min_fitness = np.min(gen_algo.fitness_val) gen_algo.termCon.tqdm.set_description_str( "Evolve feature genomes. Fitness values: Mean: {0}, max: {1}, min: {2}".format(mean, max_fitness, min_fitness) )
import pathlib import numpy as np from . import GeneticAlgorithm from database import Task class SaveCallback: def __init__(self, iterations, population_amount, task: Task, session): #self.generations = np.empty( # (iterations, population_amount), dtype=object) self.session = session self.task = task return def __call__(self, gen_algo: GeneticAlgorithm): if not self.session or not self.task: return try: self.session.add_all(gen_algo.genomes.tolist()) self.session.commit() #self.generations[gen_algo.generation][:] = gen_algo.genomes[:] #with open(self.savePath, 'wb') as fd_instances: # pickle.dump(self.generations[:gen_algo.generation+1], fd_instances) except Exception as e: print("Exception while calling the iteration end callback:", e) def update_callback(gen_algo: GeneticAlgorithm): mean = np.mean(gen_algo.fitness_val) max_fitness = np.max(gen_algo.fitness_val) min_fitness = np.min(gen_algo.fitness_val) gen_algo.termCon.tqdm.set_description_str( "Evolve feature genomes. Fitness values: Mean: {0}, max: {1}, min: {2}".format(mean, max_fitness, min_fitness) )
en
0.40184
#self.generations = np.empty( # (iterations, population_amount), dtype=object) #self.generations[gen_algo.generation][:] = gen_algo.genomes[:] #with open(self.savePath, 'wb') as fd_instances: # pickle.dump(self.generations[:gen_algo.generation+1], fd_instances)
2.329611
2
examples/src/ActiveX/LinkingVideoActiveXControl.py
aspose-slides/Aspose.Slides-for-Python-via-.NET
0
6613303
import aspose.slides as slides def activex_linking_video_activex_control(): #ExStart:LinkingVideoActiveXControl # The path to the documents directory. dataDir = "./examples/data/" outDir = "./examples/out/" # Instantiate Presentation class that represents PPTX file with slides.Presentation(dataDir + "activex_template.pptx") as presentation: # Create empty presentation instance with slides.Presentation() as newPresentation: # Remove default slide newPresentation.slides.remove_at(0) # Clone slide with Media Player ActiveX Control newPresentation.slides.insert_clone(0, presentation.slides[0]) # Access the Media Player ActiveX control and set the video path control = newPresentation.slides[0].controls[0] control.properties.remove("URL") control.properties.add("URL", dataDir + "video.mp4") # Save the Presentation newPresentation.save(outDir + "activex_linking_video_activex_control_out.pptx", slides.export.SaveFormat.PPTX) #ExEnd:LinkingVideoActiveXControl
import aspose.slides as slides def activex_linking_video_activex_control(): #ExStart:LinkingVideoActiveXControl # The path to the documents directory. dataDir = "./examples/data/" outDir = "./examples/out/" # Instantiate Presentation class that represents PPTX file with slides.Presentation(dataDir + "activex_template.pptx") as presentation: # Create empty presentation instance with slides.Presentation() as newPresentation: # Remove default slide newPresentation.slides.remove_at(0) # Clone slide with Media Player ActiveX Control newPresentation.slides.insert_clone(0, presentation.slides[0]) # Access the Media Player ActiveX control and set the video path control = newPresentation.slides[0].controls[0] control.properties.remove("URL") control.properties.add("URL", dataDir + "video.mp4") # Save the Presentation newPresentation.save(outDir + "activex_linking_video_activex_control_out.pptx", slides.export.SaveFormat.PPTX) #ExEnd:LinkingVideoActiveXControl
en
0.67752
#ExStart:LinkingVideoActiveXControl # The path to the documents directory. # Instantiate Presentation class that represents PPTX file # Create empty presentation instance # Remove default slide # Clone slide with Media Player ActiveX Control # Access the Media Player ActiveX control and set the video path # Save the Presentation #ExEnd:LinkingVideoActiveXControl
2.913956
3
Day-009/01-dictionary-comprehensions.py
arvimal/100DaysofCode-Python
1
6613304
#!/usr/bin/env python3 # Dictionary comprehensions originated in Python v3, # but were backported to v2. # Example 1 # This creates a key and value from a range print({i: str(i) for i in range(5)}) # {0: '0', 1: '1', 2: '2', 3: '3', 4: '4'} # Example 2 # Swap the key and value pairs in a dict dict_1 = {0: "Hello", 1: "World!"} print({value: key for key, value in dict_1.items()})
#!/usr/bin/env python3 # Dictionary comprehensions originated in Python v3, # but were backported to v2. # Example 1 # This creates a key and value from a range print({i: str(i) for i in range(5)}) # {0: '0', 1: '1', 2: '2', 3: '3', 4: '4'} # Example 2 # Swap the key and value pairs in a dict dict_1 = {0: "Hello", 1: "World!"} print({value: key for key, value in dict_1.items()})
en
0.677107
#!/usr/bin/env python3 # Dictionary comprehensions originated in Python v3, # but were backported to v2. # Example 1 # This creates a key and value from a range # {0: '0', 1: '1', 2: '2', 3: '3', 4: '4'} # Example 2 # Swap the key and value pairs in a dict
4.252643
4
TopicExtractor/src/utils/database.py
npnkbabu/mymlproject
0
6613305
<reponame>npnkbabu/mymlproject import psycopg2 import os import json import pandas as pd BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) CONFIG_PATH = os.path.join(BASE_DIR,'config') CONFIG_FILE = 'database.json' class NewsDatabase(): def __init__(self): print('database instantiated') @staticmethod def getConnection(): with open(os.path.join(CONFIG_PATH,CONFIG_FILE), 'r') as file: config = json.load(file) return psycopg2.connect( user = os.getenv('MYMLPROJECT_NEWSDB_USER'), password = os.getenv('<PASSWORD>'), host = config['host'], port = config['port'], database = os.getenv('MYMLPROJECT_NEWSDB_DATABASE') ) @staticmethod def dumpSources(sources): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() sql = '''TRUNCATE TABLE newsdata.source_details RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from source_details'.format(cursor.rowcount)) sql = '''insert into newsdata.source_details (source_id,content) values (%s,%s)''' record_to_insert=[] for src in sources: record_to_insert.append((1,json.dumps(src))) cursor.executemany(sql,record_to_insert) connection.commit() print('{} sources inserted into database'.format(cursor.rowcount)) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def dumpNewsData(data): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() #truncate news_data sql = '''TRUNCATE TABLE newsdata.news_data RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from news_data'.format(cursor.rowcount)) #truncate article sql = '''TRUNCATE TABLE newsdata.article RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from article'.format(cursor.rowcount)) sql = '''insert into newsdata.news_data (content) values (%s) RETURNING news_data_id''' sqlArticle = '''insert into newsdata.article (news_data_id,content) values (%s,%s)''' for newsdata in data: cursor.execute(sql,(json.dumps(newsdata),)) news_data_id = cursor.fetchone()[0] for article in newsdata: cursor.execute(sqlArticle,(news_data_id,json.dumps(article))) connection.commit() print('{} news_data inserted into database'.format(cursor.rowcount)) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def getArticlesData(): print('getting articles data') try: connection = NewsDatabase.getConnection() cursor = connection.cursor() sqlArticle = '''select article_id,content->>'content' as content from newsdata.article order by article_id ''' cursor.execute(sqlArticle) tuples = cursor.fetchall() print('{} articles retrieved'.format(len(tuples))) return pd.DataFrame(tuples,columns=['article_id','content']) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def dumpFeatures(lstData): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() #drop existing features sql = '''TRUNCATE TABLE newsdata.features RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from features'.format(cursor.rowcount)) #store tokens and tfidf into files sql = '''insert into newsdata.features (id2word_file,corpus_file,model_file,processeddata_file) values (%s,%s,%s,%s)''' cursor.execute(sql,(lstData[0],lstData[1],lstData[2],lstData[3])) connection.commit() print('{} features inserted into database'.format(cursor.rowcount)) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def GetFeatures(): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() #load tokens and tfidf into files sql = '''select id2word_file, corpus_file, model_file,processeddata_file from newsdata.features''' cursor.execute(sql) tuples = cursor.fetchall() return [x for x in tuples[0]] except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed')
import psycopg2 import os import json import pandas as pd BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) CONFIG_PATH = os.path.join(BASE_DIR,'config') CONFIG_FILE = 'database.json' class NewsDatabase(): def __init__(self): print('database instantiated') @staticmethod def getConnection(): with open(os.path.join(CONFIG_PATH,CONFIG_FILE), 'r') as file: config = json.load(file) return psycopg2.connect( user = os.getenv('MYMLPROJECT_NEWSDB_USER'), password = os.getenv('<PASSWORD>'), host = config['host'], port = config['port'], database = os.getenv('MYMLPROJECT_NEWSDB_DATABASE') ) @staticmethod def dumpSources(sources): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() sql = '''TRUNCATE TABLE newsdata.source_details RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from source_details'.format(cursor.rowcount)) sql = '''insert into newsdata.source_details (source_id,content) values (%s,%s)''' record_to_insert=[] for src in sources: record_to_insert.append((1,json.dumps(src))) cursor.executemany(sql,record_to_insert) connection.commit() print('{} sources inserted into database'.format(cursor.rowcount)) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def dumpNewsData(data): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() #truncate news_data sql = '''TRUNCATE TABLE newsdata.news_data RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from news_data'.format(cursor.rowcount)) #truncate article sql = '''TRUNCATE TABLE newsdata.article RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from article'.format(cursor.rowcount)) sql = '''insert into newsdata.news_data (content) values (%s) RETURNING news_data_id''' sqlArticle = '''insert into newsdata.article (news_data_id,content) values (%s,%s)''' for newsdata in data: cursor.execute(sql,(json.dumps(newsdata),)) news_data_id = cursor.fetchone()[0] for article in newsdata: cursor.execute(sqlArticle,(news_data_id,json.dumps(article))) connection.commit() print('{} news_data inserted into database'.format(cursor.rowcount)) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def getArticlesData(): print('getting articles data') try: connection = NewsDatabase.getConnection() cursor = connection.cursor() sqlArticle = '''select article_id,content->>'content' as content from newsdata.article order by article_id ''' cursor.execute(sqlArticle) tuples = cursor.fetchall() print('{} articles retrieved'.format(len(tuples))) return pd.DataFrame(tuples,columns=['article_id','content']) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def dumpFeatures(lstData): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() #drop existing features sql = '''TRUNCATE TABLE newsdata.features RESTART IDENTITY CASCADE''' cursor.execute(sql) print('removed {} rows from features'.format(cursor.rowcount)) #store tokens and tfidf into files sql = '''insert into newsdata.features (id2word_file,corpus_file,model_file,processeddata_file) values (%s,%s,%s,%s)''' cursor.execute(sql,(lstData[0],lstData[1],lstData[2],lstData[3])) connection.commit() print('{} features inserted into database'.format(cursor.rowcount)) except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed') @staticmethod def GetFeatures(): try: connection = NewsDatabase.getConnection() cursor = connection.cursor() #load tokens and tfidf into files sql = '''select id2word_file, corpus_file, model_file,processeddata_file from newsdata.features''' cursor.execute(sql) tuples = cursor.fetchall() return [x for x in tuples[0]] except(Exception, psycopg2.Error) as error: print('Error connecting to PostgreSQL database', error) connection = None except: print('error occured') connection = None finally: if connection != None: cursor.close() connection.close() print('PostgreSQL connection is now closed')
en
0.391369
TRUNCATE TABLE newsdata.source_details RESTART IDENTITY CASCADE insert into newsdata.source_details (source_id,content) values (%s,%s) #truncate news_data TRUNCATE TABLE newsdata.news_data RESTART IDENTITY CASCADE #truncate article TRUNCATE TABLE newsdata.article RESTART IDENTITY CASCADE insert into newsdata.news_data (content) values (%s) RETURNING news_data_id insert into newsdata.article (news_data_id,content) values (%s,%s) select article_id,content->>'content' as content from newsdata.article order by article_id #drop existing features TRUNCATE TABLE newsdata.features RESTART IDENTITY CASCADE #store tokens and tfidf into files insert into newsdata.features (id2word_file,corpus_file,model_file,processeddata_file) values (%s,%s,%s,%s) #load tokens and tfidf into files select id2word_file, corpus_file, model_file,processeddata_file from newsdata.features
2.713269
3
posicao.py
Alisson-tech/automacao_email
0
6613306
<filename>posicao.py import pyautogui import time time.sleep(5) a = pyautogui.position() print(a)
<filename>posicao.py import pyautogui import time time.sleep(5) a = pyautogui.position() print(a)
none
1
2.358453
2
bethelper/__main__.py
charstnut/Betrayal-helper
0
6613307
<reponame>charstnut/Betrayal-helper<filename>bethelper/__main__.py """ This file is the main script for the helper module """ class Game: ### Game class def __init__(self, num_players: int): if num_players < 3 or num_players > 5: raise ValueError("Number of players can only be 3 to 5.") self._num_player = num_players self._families = [] # A list of families/players in this game def init_game(self): """ Initialize the game """ pass def status(self): """ Print the status of the game """ self.print_families() self.print_map() print("Currently it's {}'s turn.".format(self.current_player())) def print_families(self): """ Print all the families in order and list their attributes """ for f in self._families: print("Family member: {}".format(f.name)) if __name__ == '__main__': game = Game(num_players=3)
""" This file is the main script for the helper module """ class Game: ### Game class def __init__(self, num_players: int): if num_players < 3 or num_players > 5: raise ValueError("Number of players can only be 3 to 5.") self._num_player = num_players self._families = [] # A list of families/players in this game def init_game(self): """ Initialize the game """ pass def status(self): """ Print the status of the game """ self.print_families() self.print_map() print("Currently it's {}'s turn.".format(self.current_player())) def print_families(self): """ Print all the families in order and list their attributes """ for f in self._families: print("Family member: {}".format(f.name)) if __name__ == '__main__': game = Game(num_players=3)
en
0.874943
This file is the main script for the helper module ### Game class # A list of families/players in this game Initialize the game Print the status of the game Print all the families in order and list their attributes
4.089895
4
FullDatasetFeatures.py
altuwairqi-s/MVS-using-CNN-and-LSTM
14
6613308
<reponame>altuwairqi-s/MVS-using-CNN-and-LSTM from __future__ import print_function import os, sys, numpy as np import argparse from scipy import misc import caffe import tempfile from math import ceil import cv2 import scipy.io as sio from matplotlib import pyplot as plt DatasetFolder = '/media/imlab/IMLab Server Data/Ubuntu/TanveerHussain/MVS/Training' proto = '/media/imlab/IMLab Server Data/Ubuntu/TanveerHussain/MVS/Models/alexnet.prototxt' model = '/media/imlab/IMLab Server Data/Ubuntu/TanveerHussain/MVS/Models/alexnet-model.caffemodel' caffe.set_mode_cpu() net = caffe.Net(proto, model, caffe.TEST) img_mean = np.array([103.94, 116.78, 123.68], dtype=np.float32) DatabaseFeautres = [] DatabaseLabel = [] for folderName in os.listdir(DatasetFolder): print(folderName) subFolder = DatasetFolder+'/'+ folderName for filename in os.listdir(subFolder): vidcap = cv2.VideoCapture(DatasetFolder+'/'+ folderName +'/'+filename) print('Feature Extraction of : ',filename) videolength = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) videoFeatures=[] frame_no=-1; while (frame_no < videolength-1): #(videolength%30) frame_no = frame_no + 1 vidcap.set(1,frame_no) ret0,img0 = vidcap.read() if(ret0 == 1): resized_image = caffe.io.resize_image(img0,[224,224]) transformer = caffe.io.Transformer({'data':net.blobs['data'].data.shape}) transformer.set_transpose('data',(2, 0, 1)) transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_raw_scale('data', 255) transformer.set_mean('data',img_mean) net.blobs['data'].reshape(1, 3, 224, 224) net.blobs['data'].data[...] = transformer.preprocess('data', resized_image) net.forward() features = net.blobs['fc1000'].data[0].reshape(1,1000) bb = np.matrix(features) features = bb.max(0) videoFeatures.append(features) print(frame_no % 15) if frame_no % 15 == 14: aa = np.asarray(videoFeatures) DatabaseFeautres.append(aa) DatabaseLabel.append(folderName) videoFeatures=[] #np.save('DatabaseFeaturesList',DatabaseFeautres) #np.save('DatabaseLabelList',DatabaseLabel) ##################### One Hot and Train Test spilt TotalFeatures= [] for sample in DatabaseFeautres: TotalFeatures.append(sample.reshape([1,15000])) TotalFeatures = np.asarray(TotalFeatures) TotalFeatures = TotalFeatures.reshape([len(DatabaseFeautres),15000]) OneHotArray = [] kk=1; for i in range(len(DatabaseFeautres)-1): OneHotArray.append(kk) if (DatabaseLabel[i] != DatabaseLabel[i+1]): kk=kk+1; OneHot= np.zeros([len(DatabaseFeautres),2], dtype='int'); for i in range(len(DatabaseFeautres)-1): print(i) OneHot[i,OneHotArray[i]-1] = 1 np.save('MVS_TotalFeatures',TotalFeatures) sio.savemat('MVS_Labels.mat', mdict={'DatabaseLabel': OneHot}) sio.savemat('MVS_TotalFeatures.mat', mdict={'TotalFeatures': TotalFeatures},appendmat=True, format='5', long_field_names=False, do_compression=True, oned_as='row') #import random #list=[] #for i in range(1500): # r=random.randint(1,7999) # if r not in list: list.append(r) # # #import os, sys, numpy as np # # # #DatasetFolder = '/media/imlab/IMLab Server Data/Datasets/UCF101/UCF-101' # # # #for folderName in os.listdir(DatasetFolder): # print(folderName) #
from __future__ import print_function import os, sys, numpy as np import argparse from scipy import misc import caffe import tempfile from math import ceil import cv2 import scipy.io as sio from matplotlib import pyplot as plt DatasetFolder = '/media/imlab/IMLab Server Data/Ubuntu/TanveerHussain/MVS/Training' proto = '/media/imlab/IMLab Server Data/Ubuntu/TanveerHussain/MVS/Models/alexnet.prototxt' model = '/media/imlab/IMLab Server Data/Ubuntu/TanveerHussain/MVS/Models/alexnet-model.caffemodel' caffe.set_mode_cpu() net = caffe.Net(proto, model, caffe.TEST) img_mean = np.array([103.94, 116.78, 123.68], dtype=np.float32) DatabaseFeautres = [] DatabaseLabel = [] for folderName in os.listdir(DatasetFolder): print(folderName) subFolder = DatasetFolder+'/'+ folderName for filename in os.listdir(subFolder): vidcap = cv2.VideoCapture(DatasetFolder+'/'+ folderName +'/'+filename) print('Feature Extraction of : ',filename) videolength = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) videoFeatures=[] frame_no=-1; while (frame_no < videolength-1): #(videolength%30) frame_no = frame_no + 1 vidcap.set(1,frame_no) ret0,img0 = vidcap.read() if(ret0 == 1): resized_image = caffe.io.resize_image(img0,[224,224]) transformer = caffe.io.Transformer({'data':net.blobs['data'].data.shape}) transformer.set_transpose('data',(2, 0, 1)) transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_raw_scale('data', 255) transformer.set_mean('data',img_mean) net.blobs['data'].reshape(1, 3, 224, 224) net.blobs['data'].data[...] = transformer.preprocess('data', resized_image) net.forward() features = net.blobs['fc1000'].data[0].reshape(1,1000) bb = np.matrix(features) features = bb.max(0) videoFeatures.append(features) print(frame_no % 15) if frame_no % 15 == 14: aa = np.asarray(videoFeatures) DatabaseFeautres.append(aa) DatabaseLabel.append(folderName) videoFeatures=[] #np.save('DatabaseFeaturesList',DatabaseFeautres) #np.save('DatabaseLabelList',DatabaseLabel) ##################### One Hot and Train Test spilt TotalFeatures= [] for sample in DatabaseFeautres: TotalFeatures.append(sample.reshape([1,15000])) TotalFeatures = np.asarray(TotalFeatures) TotalFeatures = TotalFeatures.reshape([len(DatabaseFeautres),15000]) OneHotArray = [] kk=1; for i in range(len(DatabaseFeautres)-1): OneHotArray.append(kk) if (DatabaseLabel[i] != DatabaseLabel[i+1]): kk=kk+1; OneHot= np.zeros([len(DatabaseFeautres),2], dtype='int'); for i in range(len(DatabaseFeautres)-1): print(i) OneHot[i,OneHotArray[i]-1] = 1 np.save('MVS_TotalFeatures',TotalFeatures) sio.savemat('MVS_Labels.mat', mdict={'DatabaseLabel': OneHot}) sio.savemat('MVS_TotalFeatures.mat', mdict={'TotalFeatures': TotalFeatures},appendmat=True, format='5', long_field_names=False, do_compression=True, oned_as='row') #import random #list=[] #for i in range(1500): # r=random.randint(1,7999) # if r not in list: list.append(r) # # #import os, sys, numpy as np # # # #DatasetFolder = '/media/imlab/IMLab Server Data/Datasets/UCF101/UCF-101' # # # #for folderName in os.listdir(DatasetFolder): # print(folderName) #
en
0.348264
#(videolength%30) #np.save('DatabaseFeaturesList',DatabaseFeautres) #np.save('DatabaseLabelList',DatabaseLabel) ##################### One Hot and Train Test spilt #import random #list=[] #for i in range(1500): # r=random.randint(1,7999) # if r not in list: list.append(r) # # #import os, sys, numpy as np # # # #DatasetFolder = '/media/imlab/IMLab Server Data/Datasets/UCF101/UCF-101' # # # #for folderName in os.listdir(DatasetFolder): # print(folderName) #
2.298062
2
pyfuppes/filter.py
MrFuppes/pyFuppes
1
6613309
# -*- coding: utf-8 -*- r""" Created on Mon Aug 20 11:23:26 2018 @author: <NAME>, florian\obersteiner\\kit\edu """ import numpy as np from numba import njit from scipy.interpolate import interp1d ############################################################################### def mask_repeated(a, N, atol=1e-6): """ given an array a that consists of sections of repeated elements, mask those elements in a section that repeat more than N times on SO: https://stackoverflow.com/a/58482894/10197418 Parameters ---------- a : 1d array N : int mask element if it repeats more than n times atol : float, optional absolute tolerance to check for equality. The default is 1e-6. Returns ------- boolean mask """ mask = np.ones(a.size, np.bool_) mask[N:] = ~np.isclose(a[N:], a[:-N], atol=atol, equal_nan=True) return mask ############################################################################### @njit def mask_repeated_nb(arr, n, atol=1e-6): """ numba version of mask_repeated(). Also works with input of type float. Parameters ---------- arr : 1d array n : int mask element if it repeats more than n times atol : float, optional absolute tolerance to check for equality. The default is 1e-6. Returns ------- boolean mask """ mask = np.ones(arr.shape, np.bool_) current = arr[0] count = 0 for idx, item in enumerate(arr): if abs(item-current) < atol: count += 1 else: current = item count = 1 mask[idx] = count <= n return mask ############################################################################### @njit def mask_jumps(arr, thrsh, look_ahead, abs_delta=False): """ check the elements of array "arr" if the delta between element and following element(s) exceed a threshold "trsh". How many elements to look ahead is defined by "look_ahead" """ n_el = arr.shape[0] mask = np.ones(arr.shape).astype(np.bool_) i = 0 while i < n_el-1: cur, nxt = arr[i], arr[i+1] delta_0 = np.absolute(nxt-cur) if abs_delta else nxt-cur if delta_0 > thrsh: for value in arr[i+1:i+look_ahead+1]: delta_1 = np.absolute(value-cur) if abs_delta else value-cur if delta_1 > thrsh: mask[i+1] = False i += 1 else: break i += 1 return mask ############################################################################### def filter_jumps(arr, thrsh, look_ahead, abs_delta=False, vmiss=np.nan, remove_repeated=False, interpol_jumps=False, interpol_kind='linear'): """ wrapper around mask_jumps() ! interpolation assumes equidistant spacing of the independent variable of which arr depends ! """ if not isinstance(arr, np.ndarray): raise ValueError("input array must be of class numpy ndarray.") if arr.ndim > 1: raise ValueError("input array must be numpy 1d array.") if not isinstance(look_ahead, int): raise ValueError("parameter look_ahead must be an integer.") if look_ahead >= arr.shape[0] or look_ahead < 1: raise ValueError(f"parameter look_ahead must be >=1 and <{arr.shape[0]}.") result = arr.copy() # do not touch the input... if not np.isnan(vmiss): result[vmiss] = np.nan if remove_repeated: result[~mask_repeated(result)] = np.nan mask = mask_jumps(result, thrsh, look_ahead, abs_delta=abs_delta) result[~mask] = np.nan if interpol_jumps: f_ip = interp1d(np.arange(0, result.shape[0])[mask], result[mask], kind=interpol_kind, fill_value='extrapolate') result = f_ip(np.arange(0, result.shape[0])) return (result, mask) return (result, mask) ############################################################################### def filter_jumps_np(v, max_delta, no_val=np.nan, use_abs_delta=True, reset_buffer_after=3, remove_doubles=False, interpol_jumps=False, interpol_kind='linear'): """ if v is dependent on another variable x (e.g. time) and if that x is not equidistant, do NOT use interpolation. Parameters ---------- v : np 1d array data to filter. max_delta : float defines "jump". no_val : float, optional missing value placeholder. The default is np.nan. use_abs_delta : boolean, optional use the absolute delta to identify jumps. The default is True. reset_buffer_after : int, optional how many elements to wait until reset. The default is 3. remove_doubles : boolean, optional remove elements that are repeated once. The default is False. interpol_jumps : boolean, optional decide to interpolate filtered values. The default is False. interpol_kind : string, optional how to interpolate, see scipy.interpolate.interp1d. The default is 'linear'. Returns ------- dict. 'filtered': filtered data 'ix_del': idices of deleted elements 'ix_rem': indices of remaining elements """ ix_del = np.full(v.shape[0], -1, dtype=int) # deletion index ix_rem = np.full(v.shape[0], -1, dtype=int) # remaining index buffer = [False, 0] for ix, v_ix in enumerate(v): if any([~np.isfinite(v_ix), v_ix == no_val, np.isnan(v_ix)]): ix_rem[ix] = ix continue # skip line if value is np.nan if not buffer[0]: buffer[0] = v_ix ix_rem[ix] = ix continue # fill buffer if not done so yet if use_abs_delta: delta = abs(v_ix-buffer[0]) else: delta = v_ix-buffer[0] if delta > max_delta: # jump found! v[ix] = no_val ix_del[ix] = ix buffer[1] += 1 if reset_buffer_after: if buffer[1] == reset_buffer_after: buffer = [v_ix, 0] else: # no jump,... buffer[0] = v_ix if remove_doubles: # check for double values... if delta == 0.: # double found! v[ix] = no_val ix_del[ix] = ix else: # no double ix_rem[ix] = ix else: ix_rem[ix] = ix w_valid = np.where(ix_del != -1) ix_del = ix_del[w_valid] w_valid = np.where(ix_rem != -1) ix_rem = ix_rem[w_valid] if interpol_jumps: tmp_x = (np.arange(0, v.shape[0]))[ix_rem] tmp_y = v[ix_rem] f_ip = interp1d(tmp_x, tmp_y, kind=interpol_kind, fill_value='extrapolate') filtered = f_ip(np.arange(0, v.shape[0])) else: w_valid = np.where(v != no_val) filtered = v[w_valid] return {'filtered': filtered, 'ix_del': ix_del, 'ix_rem': ix_rem} ############################################################################### def del_at_edge(v, n_cut, add=2, out_len='same'): """ assume v to be a 1D array which contains blocks of NaNs. returns: v with "more NaNs", i.e. range of NaN-blocks is extended by n_cut. """ tf = np.isfinite(v)*1. mask = np.convolve(tf, np.ones((int(n_cut+add),))/int(n_cut+add), mode=out_len) if tf[0] > 0.9: mask[0] = 1. if tf[-1] > 0.9: mask[-1] = 1. mask[np.where(mask < 0.999)] = np.nan return v * mask ###############################################################################
# -*- coding: utf-8 -*- r""" Created on Mon Aug 20 11:23:26 2018 @author: <NAME>, florian\obersteiner\\kit\edu """ import numpy as np from numba import njit from scipy.interpolate import interp1d ############################################################################### def mask_repeated(a, N, atol=1e-6): """ given an array a that consists of sections of repeated elements, mask those elements in a section that repeat more than N times on SO: https://stackoverflow.com/a/58482894/10197418 Parameters ---------- a : 1d array N : int mask element if it repeats more than n times atol : float, optional absolute tolerance to check for equality. The default is 1e-6. Returns ------- boolean mask """ mask = np.ones(a.size, np.bool_) mask[N:] = ~np.isclose(a[N:], a[:-N], atol=atol, equal_nan=True) return mask ############################################################################### @njit def mask_repeated_nb(arr, n, atol=1e-6): """ numba version of mask_repeated(). Also works with input of type float. Parameters ---------- arr : 1d array n : int mask element if it repeats more than n times atol : float, optional absolute tolerance to check for equality. The default is 1e-6. Returns ------- boolean mask """ mask = np.ones(arr.shape, np.bool_) current = arr[0] count = 0 for idx, item in enumerate(arr): if abs(item-current) < atol: count += 1 else: current = item count = 1 mask[idx] = count <= n return mask ############################################################################### @njit def mask_jumps(arr, thrsh, look_ahead, abs_delta=False): """ check the elements of array "arr" if the delta between element and following element(s) exceed a threshold "trsh". How many elements to look ahead is defined by "look_ahead" """ n_el = arr.shape[0] mask = np.ones(arr.shape).astype(np.bool_) i = 0 while i < n_el-1: cur, nxt = arr[i], arr[i+1] delta_0 = np.absolute(nxt-cur) if abs_delta else nxt-cur if delta_0 > thrsh: for value in arr[i+1:i+look_ahead+1]: delta_1 = np.absolute(value-cur) if abs_delta else value-cur if delta_1 > thrsh: mask[i+1] = False i += 1 else: break i += 1 return mask ############################################################################### def filter_jumps(arr, thrsh, look_ahead, abs_delta=False, vmiss=np.nan, remove_repeated=False, interpol_jumps=False, interpol_kind='linear'): """ wrapper around mask_jumps() ! interpolation assumes equidistant spacing of the independent variable of which arr depends ! """ if not isinstance(arr, np.ndarray): raise ValueError("input array must be of class numpy ndarray.") if arr.ndim > 1: raise ValueError("input array must be numpy 1d array.") if not isinstance(look_ahead, int): raise ValueError("parameter look_ahead must be an integer.") if look_ahead >= arr.shape[0] or look_ahead < 1: raise ValueError(f"parameter look_ahead must be >=1 and <{arr.shape[0]}.") result = arr.copy() # do not touch the input... if not np.isnan(vmiss): result[vmiss] = np.nan if remove_repeated: result[~mask_repeated(result)] = np.nan mask = mask_jumps(result, thrsh, look_ahead, abs_delta=abs_delta) result[~mask] = np.nan if interpol_jumps: f_ip = interp1d(np.arange(0, result.shape[0])[mask], result[mask], kind=interpol_kind, fill_value='extrapolate') result = f_ip(np.arange(0, result.shape[0])) return (result, mask) return (result, mask) ############################################################################### def filter_jumps_np(v, max_delta, no_val=np.nan, use_abs_delta=True, reset_buffer_after=3, remove_doubles=False, interpol_jumps=False, interpol_kind='linear'): """ if v is dependent on another variable x (e.g. time) and if that x is not equidistant, do NOT use interpolation. Parameters ---------- v : np 1d array data to filter. max_delta : float defines "jump". no_val : float, optional missing value placeholder. The default is np.nan. use_abs_delta : boolean, optional use the absolute delta to identify jumps. The default is True. reset_buffer_after : int, optional how many elements to wait until reset. The default is 3. remove_doubles : boolean, optional remove elements that are repeated once. The default is False. interpol_jumps : boolean, optional decide to interpolate filtered values. The default is False. interpol_kind : string, optional how to interpolate, see scipy.interpolate.interp1d. The default is 'linear'. Returns ------- dict. 'filtered': filtered data 'ix_del': idices of deleted elements 'ix_rem': indices of remaining elements """ ix_del = np.full(v.shape[0], -1, dtype=int) # deletion index ix_rem = np.full(v.shape[0], -1, dtype=int) # remaining index buffer = [False, 0] for ix, v_ix in enumerate(v): if any([~np.isfinite(v_ix), v_ix == no_val, np.isnan(v_ix)]): ix_rem[ix] = ix continue # skip line if value is np.nan if not buffer[0]: buffer[0] = v_ix ix_rem[ix] = ix continue # fill buffer if not done so yet if use_abs_delta: delta = abs(v_ix-buffer[0]) else: delta = v_ix-buffer[0] if delta > max_delta: # jump found! v[ix] = no_val ix_del[ix] = ix buffer[1] += 1 if reset_buffer_after: if buffer[1] == reset_buffer_after: buffer = [v_ix, 0] else: # no jump,... buffer[0] = v_ix if remove_doubles: # check for double values... if delta == 0.: # double found! v[ix] = no_val ix_del[ix] = ix else: # no double ix_rem[ix] = ix else: ix_rem[ix] = ix w_valid = np.where(ix_del != -1) ix_del = ix_del[w_valid] w_valid = np.where(ix_rem != -1) ix_rem = ix_rem[w_valid] if interpol_jumps: tmp_x = (np.arange(0, v.shape[0]))[ix_rem] tmp_y = v[ix_rem] f_ip = interp1d(tmp_x, tmp_y, kind=interpol_kind, fill_value='extrapolate') filtered = f_ip(np.arange(0, v.shape[0])) else: w_valid = np.where(v != no_val) filtered = v[w_valid] return {'filtered': filtered, 'ix_del': ix_del, 'ix_rem': ix_rem} ############################################################################### def del_at_edge(v, n_cut, add=2, out_len='same'): """ assume v to be a 1D array which contains blocks of NaNs. returns: v with "more NaNs", i.e. range of NaN-blocks is extended by n_cut. """ tf = np.isfinite(v)*1. mask = np.convolve(tf, np.ones((int(n_cut+add),))/int(n_cut+add), mode=out_len) if tf[0] > 0.9: mask[0] = 1. if tf[-1] > 0.9: mask[-1] = 1. mask[np.where(mask < 0.999)] = np.nan return v * mask ###############################################################################
en
0.400724
# -*- coding: utf-8 -*- Created on Mon Aug 20 11:23:26 2018 @author: <NAME>, florian\obersteiner\\kit\edu ############################################################################### given an array a that consists of sections of repeated elements, mask those elements in a section that repeat more than N times on SO: https://stackoverflow.com/a/58482894/10197418 Parameters ---------- a : 1d array N : int mask element if it repeats more than n times atol : float, optional absolute tolerance to check for equality. The default is 1e-6. Returns ------- boolean mask ############################################################################### numba version of mask_repeated(). Also works with input of type float. Parameters ---------- arr : 1d array n : int mask element if it repeats more than n times atol : float, optional absolute tolerance to check for equality. The default is 1e-6. Returns ------- boolean mask ############################################################################### check the elements of array "arr" if the delta between element and following element(s) exceed a threshold "trsh". How many elements to look ahead is defined by "look_ahead" ############################################################################### wrapper around mask_jumps() ! interpolation assumes equidistant spacing of the independent variable of which arr depends ! # do not touch the input... ############################################################################### if v is dependent on another variable x (e.g. time) and if that x is not equidistant, do NOT use interpolation. Parameters ---------- v : np 1d array data to filter. max_delta : float defines "jump". no_val : float, optional missing value placeholder. The default is np.nan. use_abs_delta : boolean, optional use the absolute delta to identify jumps. The default is True. reset_buffer_after : int, optional how many elements to wait until reset. The default is 3. remove_doubles : boolean, optional remove elements that are repeated once. The default is False. interpol_jumps : boolean, optional decide to interpolate filtered values. The default is False. interpol_kind : string, optional how to interpolate, see scipy.interpolate.interp1d. The default is 'linear'. Returns ------- dict. 'filtered': filtered data 'ix_del': idices of deleted elements 'ix_rem': indices of remaining elements # deletion index # remaining index # skip line if value is np.nan # fill buffer if not done so yet # jump found! # no jump,... # check for double values... # double found! # no double ############################################################################### assume v to be a 1D array which contains blocks of NaNs. returns: v with "more NaNs", i.e. range of NaN-blocks is extended by n_cut. ###############################################################################
3.300761
3
uplink/converters/standard.py
SakornW/uplink
0
6613310
<reponame>SakornW/uplink # Standard library imports import json # Local imports from uplink.converters import interfaces, register_default_converter_factory class Cast(interfaces.Converter): def __init__(self, caster, converter): self._cast = caster self._converter = converter def set_chain(self, chain): self._converter.set_chain(chain) def convert(self, value): if callable(self._cast): value = self._cast(value) return self._converter(value) class RequestBodyConverter(interfaces.Converter): @staticmethod def _default_json_dumper(obj): return obj.__dict__ # pragma: no cover def convert(self, value): if isinstance(value, str): return value dumped = json.dumps(value, default=self._default_json_dumper) return json.loads(dumped) class StringConverter(interfaces.Converter): def convert(self, value): return str(value) @register_default_converter_factory class StandardConverter(interfaces.Factory): """ The default converter, this class seeks to provide sane alternatives for (de)serialization when all else fails -- e.g., no other converters could handle a particular type. """ def create_response_body_converter(self, type_, *args, **kwargs): if isinstance(type_, interfaces.Converter): return type_ def create_request_body_converter(self, type_, *args, **kwargs): return Cast(type_, RequestBodyConverter()) # pragma: no cover def create_string_converter(self, type_, *args, **kwargs): return Cast(type_, StringConverter()) # pragma: no cover
# Standard library imports import json # Local imports from uplink.converters import interfaces, register_default_converter_factory class Cast(interfaces.Converter): def __init__(self, caster, converter): self._cast = caster self._converter = converter def set_chain(self, chain): self._converter.set_chain(chain) def convert(self, value): if callable(self._cast): value = self._cast(value) return self._converter(value) class RequestBodyConverter(interfaces.Converter): @staticmethod def _default_json_dumper(obj): return obj.__dict__ # pragma: no cover def convert(self, value): if isinstance(value, str): return value dumped = json.dumps(value, default=self._default_json_dumper) return json.loads(dumped) class StringConverter(interfaces.Converter): def convert(self, value): return str(value) @register_default_converter_factory class StandardConverter(interfaces.Factory): """ The default converter, this class seeks to provide sane alternatives for (de)serialization when all else fails -- e.g., no other converters could handle a particular type. """ def create_response_body_converter(self, type_, *args, **kwargs): if isinstance(type_, interfaces.Converter): return type_ def create_request_body_converter(self, type_, *args, **kwargs): return Cast(type_, RequestBodyConverter()) # pragma: no cover def create_string_converter(self, type_, *args, **kwargs): return Cast(type_, StringConverter()) # pragma: no cover
en
0.638023
# Standard library imports # Local imports # pragma: no cover The default converter, this class seeks to provide sane alternatives for (de)serialization when all else fails -- e.g., no other converters could handle a particular type. # pragma: no cover # pragma: no cover
2.314465
2
simple_racing/racing/pitstop_race.py
sebanie15/simple_racing
0
6613311
<reponame>sebanie15/simple_racing # !/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: sebanie15 """ from random import randint from .race import Race from .race_data import RaceData class PitstopRace(Race): def __init__(self, track, laps, cars, pitstop=10): super().__init__(track, laps, cars) self._pitstop_time = pitstop @property def pitstop_time(self): return self.pitstop_time @pitstop_time.setter def pitstop_time(self, minutes=10): self.pitstop_time = minutes / 60 def __car_timestamp(self, car): car_speed = randint(car.max_speed // 2, car.max_speed + 1) calc_distance = (self.__time_scale / 60) * car_speed loop_time = 0.0 fuel_consumption = 0.0 tanked = False if car.odometer + calc_distance > self.track.total_length: calc_distance = self.track.total_length - car.odometer if car.fuel_level > 0 and car.odometer < self.track.total_length: fuel_consumption, loop_time = car.drive(distance=calc_distance, speed=car_speed) if self.__is_end_of_fuel(car): car.tank_fuel(car.tank_capacity) tanked = True result = RaceData(self.__racing_time, car.name, car_speed, car.odometer, fuel_consumption, self.__calc_lap(car), loop_time, tanked) return result, self.__is_finished(car)
# !/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: sebanie15 """ from random import randint from .race import Race from .race_data import RaceData class PitstopRace(Race): def __init__(self, track, laps, cars, pitstop=10): super().__init__(track, laps, cars) self._pitstop_time = pitstop @property def pitstop_time(self): return self.pitstop_time @pitstop_time.setter def pitstop_time(self, minutes=10): self.pitstop_time = minutes / 60 def __car_timestamp(self, car): car_speed = randint(car.max_speed // 2, car.max_speed + 1) calc_distance = (self.__time_scale / 60) * car_speed loop_time = 0.0 fuel_consumption = 0.0 tanked = False if car.odometer + calc_distance > self.track.total_length: calc_distance = self.track.total_length - car.odometer if car.fuel_level > 0 and car.odometer < self.track.total_length: fuel_consumption, loop_time = car.drive(distance=calc_distance, speed=car_speed) if self.__is_end_of_fuel(car): car.tank_fuel(car.tank_capacity) tanked = True result = RaceData(self.__racing_time, car.name, car_speed, car.odometer, fuel_consumption, self.__calc_lap(car), loop_time, tanked) return result, self.__is_finished(car)
en
0.267253
# !/usr/bin/env python3 # -*- coding: utf-8 -*- @author: sebanie15
3.301518
3
csc work/final/w15/Q8.py
mdnu/snake
1
6613312
def last_phonemes(phoneme_list): ''' (list of str) -> list of str Return the last vowel phoneme and any subequent consonant phoneme(s) from phoneme_list, in the same order as they appear in phoneme_list. >>> last_phonemes(['AE1', 'B', 'S', 'IH0', 'N', 'TH']) ['IH0', 'N', 'TH'] >>> last_phonemes(['IH0', 'N']) ['IH0', 'N'] ''' vowels = 'AEIOU' last_phonemes_list = [] candidate_phoneme = '' for i in range(len(phoneme_list)): if phoneme_list[i][0] in vowels: if phoneme_list[i] > candidate_phoneme: candidate_phoneme = phoneme_list[i] last_phonemes_list = phoneme_list[phoneme_list.index(candidate_phoneme):] return last_phonemes_list def build_rhyming_dict(words_to_phonemes): # complete the function body (7 MARKS) ''' (dict of {str: list of str}) -> dict of {str: list of str} Return a dict where the keys are the same as the keys in word_to_phonemes and the value for each key is a list of all words that rhyme with the key. Two words rhyme if and only if they are different and their last vowel phonemes and all subsequent consonant phoneme(s) after the last vowel phonemes match. >>> words_to_phonemes = read_pronunciation(open('dictionary.txt')) >>> words_to_rhyming_words = build_rhyming_dict(words_to_phonemes) >>> words_to_rhyming_words['CRAIG'] ['BAIG', 'BEGUE', 'FLAIG', 'HAGUE', 'HAIG', 'LAPHROAIG', 'MACIAG', 'MCCAGUE', 'MCCAIG', 'MCKAIG', 'MCQUAIG', 'MCTAGUE', 'NEST-EGG', 'O'LAGUE', 'PLAGUE', 'RAGUE', 'SPRAGUE', 'VAGUE'] >>> # Notice that 'CRAIG' is not in the list of words that rhyme with 'CRAIG' ''' # recall that two words rhyme iff their last vowel phonemes and # all subsequent consonant phonemes matched. words_to_rhyming_words = {} for word in words_to_phonemes: rhyming_words = [] for potential_rhyme in words_to_phonemes: if (last_phonemes(words_to_phonemes[word]) == last_phonemes(words_to_phonemes[potential_rhyme])): rhyming_words.append(potential_rhyme) rhyming_words.remove(word) words_to_rhyming_words[word] = rhyming_words return words_to_rhyming_words # suppose we had a solution which had a runtime which was quadratic in the # length of the words_to_phonemes pythoin dict. If it took 1 second for # build_rhyming_dict to run for a words_to_phonemes dict with 1000 words, # then if we doubled the length to 2000 words, then we expect it to take # 4 seconds, since if a = 1000, f(a) = 1 second, then f(2a) = (2^2) = 4
def last_phonemes(phoneme_list): ''' (list of str) -> list of str Return the last vowel phoneme and any subequent consonant phoneme(s) from phoneme_list, in the same order as they appear in phoneme_list. >>> last_phonemes(['AE1', 'B', 'S', 'IH0', 'N', 'TH']) ['IH0', 'N', 'TH'] >>> last_phonemes(['IH0', 'N']) ['IH0', 'N'] ''' vowels = 'AEIOU' last_phonemes_list = [] candidate_phoneme = '' for i in range(len(phoneme_list)): if phoneme_list[i][0] in vowels: if phoneme_list[i] > candidate_phoneme: candidate_phoneme = phoneme_list[i] last_phonemes_list = phoneme_list[phoneme_list.index(candidate_phoneme):] return last_phonemes_list def build_rhyming_dict(words_to_phonemes): # complete the function body (7 MARKS) ''' (dict of {str: list of str}) -> dict of {str: list of str} Return a dict where the keys are the same as the keys in word_to_phonemes and the value for each key is a list of all words that rhyme with the key. Two words rhyme if and only if they are different and their last vowel phonemes and all subsequent consonant phoneme(s) after the last vowel phonemes match. >>> words_to_phonemes = read_pronunciation(open('dictionary.txt')) >>> words_to_rhyming_words = build_rhyming_dict(words_to_phonemes) >>> words_to_rhyming_words['CRAIG'] ['BAIG', 'BEGUE', 'FLAIG', 'HAGUE', 'HAIG', 'LAPHROAIG', 'MACIAG', 'MCCAGUE', 'MCCAIG', 'MCKAIG', 'MCQUAIG', 'MCTAGUE', 'NEST-EGG', 'O'LAGUE', 'PLAGUE', 'RAGUE', 'SPRAGUE', 'VAGUE'] >>> # Notice that 'CRAIG' is not in the list of words that rhyme with 'CRAIG' ''' # recall that two words rhyme iff their last vowel phonemes and # all subsequent consonant phonemes matched. words_to_rhyming_words = {} for word in words_to_phonemes: rhyming_words = [] for potential_rhyme in words_to_phonemes: if (last_phonemes(words_to_phonemes[word]) == last_phonemes(words_to_phonemes[potential_rhyme])): rhyming_words.append(potential_rhyme) rhyming_words.remove(word) words_to_rhyming_words[word] = rhyming_words return words_to_rhyming_words # suppose we had a solution which had a runtime which was quadratic in the # length of the words_to_phonemes pythoin dict. If it took 1 second for # build_rhyming_dict to run for a words_to_phonemes dict with 1000 words, # then if we doubled the length to 2000 words, then we expect it to take # 4 seconds, since if a = 1000, f(a) = 1 second, then f(2a) = (2^2) = 4
en
0.846239
(list of str) -> list of str Return the last vowel phoneme and any subequent consonant phoneme(s) from phoneme_list, in the same order as they appear in phoneme_list. >>> last_phonemes(['AE1', 'B', 'S', 'IH0', 'N', 'TH']) ['IH0', 'N', 'TH'] >>> last_phonemes(['IH0', 'N']) ['IH0', 'N'] # complete the function body (7 MARKS) (dict of {str: list of str}) -> dict of {str: list of str} Return a dict where the keys are the same as the keys in word_to_phonemes and the value for each key is a list of all words that rhyme with the key. Two words rhyme if and only if they are different and their last vowel phonemes and all subsequent consonant phoneme(s) after the last vowel phonemes match. >>> words_to_phonemes = read_pronunciation(open('dictionary.txt')) >>> words_to_rhyming_words = build_rhyming_dict(words_to_phonemes) >>> words_to_rhyming_words['CRAIG'] ['BAIG', 'BEGUE', 'FLAIG', 'HAGUE', 'HAIG', 'LAPHROAIG', 'MACIAG', 'MCCAGUE', 'MCCAIG', 'MCKAIG', 'MCQUAIG', 'MCTAGUE', 'NEST-EGG', 'O'LAGUE', 'PLAGUE', 'RAGUE', 'SPRAGUE', 'VAGUE'] >>> # Notice that 'CRAIG' is not in the list of words that rhyme with 'CRAIG' # recall that two words rhyme iff their last vowel phonemes and # all subsequent consonant phonemes matched. # suppose we had a solution which had a runtime which was quadratic in the # length of the words_to_phonemes pythoin dict. If it took 1 second for # build_rhyming_dict to run for a words_to_phonemes dict with 1000 words, # then if we doubled the length to 2000 words, then we expect it to take # 4 seconds, since if a = 1000, f(a) = 1 second, then f(2a) = (2^2) = 4
3.781206
4
plotting/plot_pub_pk.py
sjforeman/RadioFisher
3
6613313
<reponame>sjforeman/RadioFisher #!/usr/bin/python """ Process EOS Fisher matrices and plot P(k). """ import numpy as np import pylab as P from rfwrapper import rf import matplotlib.patches import matplotlib.cm from units import * from mpi4py import MPI import os import euclid cosmo = rf.experiments.cosmo #names = ["GBT", "BINGO", "WSRT", "APERTIF", "JVLA", "ASKAP", "KAT7", "MeerKAT", "SKA1mid", "SKA1MK", "iSKA1MK", "aSKA1MK", "SKA1MK_A0"] names = ["SKA1MK",] #["SKA1mid",] ["MeerKAT",] colours = ['#22AD1A', '#3399FF', '#ED7624'] # Fiducial value and plotting fig = P.figure() ax = fig.add_subplot(111) for k in range(len(names)): root = "output/" + names[k] # Load cosmo fns. dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T zc, Hc, dAc, Dc, fc = dat z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T kc = np.genfromtxt(root+"-fisher-kc.dat").T # Load Fisher matrices as fn. of z Nbins = zc.size F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)] # Save P(k) rebinning info #np.savetxt(root+"-rebin-Fbase-%d.dat" % i, np.array(binning_info['F_base']) ) #np.savetxt(root+"-rebin-cumul-%d.dat" % i, np.array(binning_info['cumul']) ) #np.savetxt(root+"-rebin-kgrid-%d.dat" % i, np.array(binning_info['kgrid']) ) #np.savetxt(root+"-rebin-Vfac-%d.dat" % i, np.array([binning_info['Vfac'],]) ) # EOS FISHER MATRIX # Actually, (aperp, apar) are (D_A, H) pnames = ['A', 'b_HI', 'Tb', 'sigma_NL', 'sigma8', 'n_s', 'f', 'aperp', 'apar', 'omegak', 'omegaDE', 'w0', 'wa', 'h', 'gamma'] pnames += ["pk%d" % i for i in range(kc.size)] zfns = [1,] F, lbls = rf.combined_fisher_matrix( F_list, expand=zfns, names=pnames, exclude=[2,4,5,6,7,8 ] ) # Remove elements with zero diagonal (completely unconstrained) zero_idxs = np.where(np.diag(F) == 0.)[0] print "Zero idxs:", zero_idxs F = rf.fisher_with_excluded_params(F, excl=zero_idxs) lbls = lbls[:-zero_idxs.size] #rf.plot_corrmat(F, lbls) # Overlay error ellipses as a fn. of z p1 = rf.indexes_for_sampled_fns(4, zc.size, zfns) #p2 = rf.indexes_for_sampled_fns(5, zc.size, zfns) # Full covmat cov = np.linalg.inv(F) diags = np.sqrt(np.diag(cov)) # Reduced covmat #F2 = rf.fisher_with_excluded_params(F, excl=[l for l in range(19, 55)]) #cov2 = np.linalg.inv(F2) #diags2 = np.sqrt(np.diag(cov2)) # Print diags. for i in range(diags.size): #if i < diags2.size: # print "%2d %10s %3.4f %3.4f" % (i, lbls[i], diags[i], diags2[i]) #else: print "%2d %10s %3.4f" % (i, lbls[i], diags[i]) exit() P.subplot(111) idxs = np.array([l for l in range(7, 43)]) print idxs, idxs.size P.plot(kc[:idxs.size], diags[idxs], lw=1.5) P.xscale('log') P.ylim((0., 0.2)) P.show() exit() print "Cond.", np.linalg.cond(F) F[-1,:] *= 1e5; F[:,-1] *= 1e5 F[-2,:] *= 1e5; F[:,-2] *= 1e5 F[-3,:] *= 1e5; F[:,-3] *= 1e5 print "Cond.", np.linalg.cond(F) #print np.diag(F) #print F[-1,:] rf.plot_corrmat(F, lbls) P.show() exit() #ax.legend((l for l in lines), (lbl for lbl in labels), prop={'size':'x-large'}) #ax.set_xlim((-1.31, -0.70)) #ax.set_ylim((-0.7, 0.7)) P.ylim((0., 1.)) P.xscale('log') ax.set_xlabel(r"$k$", fontdict={'fontsize':'20'}) ax.set_ylabel(r"$P(k)$", fontdict={'fontsize':'20'}) fontsize = 16. for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) P.tight_layout() P.show()
#!/usr/bin/python """ Process EOS Fisher matrices and plot P(k). """ import numpy as np import pylab as P from rfwrapper import rf import matplotlib.patches import matplotlib.cm from units import * from mpi4py import MPI import os import euclid cosmo = rf.experiments.cosmo #names = ["GBT", "BINGO", "WSRT", "APERTIF", "JVLA", "ASKAP", "KAT7", "MeerKAT", "SKA1mid", "SKA1MK", "iSKA1MK", "aSKA1MK", "SKA1MK_A0"] names = ["SKA1MK",] #["SKA1mid",] ["MeerKAT",] colours = ['#22AD1A', '#3399FF', '#ED7624'] # Fiducial value and plotting fig = P.figure() ax = fig.add_subplot(111) for k in range(len(names)): root = "output/" + names[k] # Load cosmo fns. dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T zc, Hc, dAc, Dc, fc = dat z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T kc = np.genfromtxt(root+"-fisher-kc.dat").T # Load Fisher matrices as fn. of z Nbins = zc.size F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)] # Save P(k) rebinning info #np.savetxt(root+"-rebin-Fbase-%d.dat" % i, np.array(binning_info['F_base']) ) #np.savetxt(root+"-rebin-cumul-%d.dat" % i, np.array(binning_info['cumul']) ) #np.savetxt(root+"-rebin-kgrid-%d.dat" % i, np.array(binning_info['kgrid']) ) #np.savetxt(root+"-rebin-Vfac-%d.dat" % i, np.array([binning_info['Vfac'],]) ) # EOS FISHER MATRIX # Actually, (aperp, apar) are (D_A, H) pnames = ['A', 'b_HI', 'Tb', 'sigma_NL', 'sigma8', 'n_s', 'f', 'aperp', 'apar', 'omegak', 'omegaDE', 'w0', 'wa', 'h', 'gamma'] pnames += ["pk%d" % i for i in range(kc.size)] zfns = [1,] F, lbls = rf.combined_fisher_matrix( F_list, expand=zfns, names=pnames, exclude=[2,4,5,6,7,8 ] ) # Remove elements with zero diagonal (completely unconstrained) zero_idxs = np.where(np.diag(F) == 0.)[0] print "Zero idxs:", zero_idxs F = rf.fisher_with_excluded_params(F, excl=zero_idxs) lbls = lbls[:-zero_idxs.size] #rf.plot_corrmat(F, lbls) # Overlay error ellipses as a fn. of z p1 = rf.indexes_for_sampled_fns(4, zc.size, zfns) #p2 = rf.indexes_for_sampled_fns(5, zc.size, zfns) # Full covmat cov = np.linalg.inv(F) diags = np.sqrt(np.diag(cov)) # Reduced covmat #F2 = rf.fisher_with_excluded_params(F, excl=[l for l in range(19, 55)]) #cov2 = np.linalg.inv(F2) #diags2 = np.sqrt(np.diag(cov2)) # Print diags. for i in range(diags.size): #if i < diags2.size: # print "%2d %10s %3.4f %3.4f" % (i, lbls[i], diags[i], diags2[i]) #else: print "%2d %10s %3.4f" % (i, lbls[i], diags[i]) exit() P.subplot(111) idxs = np.array([l for l in range(7, 43)]) print idxs, idxs.size P.plot(kc[:idxs.size], diags[idxs], lw=1.5) P.xscale('log') P.ylim((0., 0.2)) P.show() exit() print "Cond.", np.linalg.cond(F) F[-1,:] *= 1e5; F[:,-1] *= 1e5 F[-2,:] *= 1e5; F[:,-2] *= 1e5 F[-3,:] *= 1e5; F[:,-3] *= 1e5 print "Cond.", np.linalg.cond(F) #print np.diag(F) #print F[-1,:] rf.plot_corrmat(F, lbls) P.show() exit() #ax.legend((l for l in lines), (lbl for lbl in labels), prop={'size':'x-large'}) #ax.set_xlim((-1.31, -0.70)) #ax.set_ylim((-0.7, 0.7)) P.ylim((0., 1.)) P.xscale('log') ax.set_xlabel(r"$k$", fontdict={'fontsize':'20'}) ax.set_ylabel(r"$P(k)$", fontdict={'fontsize':'20'}) fontsize = 16. for tick in ax.yaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) for tick in ax.xaxis.get_major_ticks(): tick.label1.set_fontsize(fontsize) P.tight_layout() P.show()
en
0.47903
#!/usr/bin/python Process EOS Fisher matrices and plot P(k). #names = ["GBT", "BINGO", "WSRT", "APERTIF", "JVLA", "ASKAP", "KAT7", "MeerKAT", "SKA1mid", "SKA1MK", "iSKA1MK", "aSKA1MK", "SKA1MK_A0"] #["SKA1mid",] ["MeerKAT",] # Fiducial value and plotting # Load cosmo fns. # Load Fisher matrices as fn. of z # Save P(k) rebinning info #np.savetxt(root+"-rebin-Fbase-%d.dat" % i, np.array(binning_info['F_base']) ) #np.savetxt(root+"-rebin-cumul-%d.dat" % i, np.array(binning_info['cumul']) ) #np.savetxt(root+"-rebin-kgrid-%d.dat" % i, np.array(binning_info['kgrid']) ) #np.savetxt(root+"-rebin-Vfac-%d.dat" % i, np.array([binning_info['Vfac'],]) ) # EOS FISHER MATRIX # Actually, (aperp, apar) are (D_A, H) # Remove elements with zero diagonal (completely unconstrained) #rf.plot_corrmat(F, lbls) # Overlay error ellipses as a fn. of z #p2 = rf.indexes_for_sampled_fns(5, zc.size, zfns) # Full covmat # Reduced covmat #F2 = rf.fisher_with_excluded_params(F, excl=[l for l in range(19, 55)]) #cov2 = np.linalg.inv(F2) #diags2 = np.sqrt(np.diag(cov2)) # Print diags. #if i < diags2.size: # print "%2d %10s %3.4f %3.4f" % (i, lbls[i], diags[i], diags2[i]) #else: #print np.diag(F) #print F[-1,:] #ax.legend((l for l in lines), (lbl for lbl in labels), prop={'size':'x-large'}) #ax.set_xlim((-1.31, -0.70)) #ax.set_ylim((-0.7, 0.7))
2.055647
2
src/sqlencode/__utilfuncs__.py
mtaboun/sqlencode
0
6613314
<filename>src/sqlencode/__utilfuncs__.py def combine_queries(query1: str, query2: str) -> str: return "Hello"
<filename>src/sqlencode/__utilfuncs__.py def combine_queries(query1: str, query2: str) -> str: return "Hello"
none
1
1.37021
1
utils/commonUtil.py
xia-deng/lawerWeb
0
6613315
<gh_stars>0 import re from django.db.models import QuerySet from xpinyin import Pinyin class CommonUtil: @staticmethod def toString(Obj): objtype=type(Obj) print(objtype) @staticmethod def querySetToList(querySet,field=""): if(type(querySet)==QuerySet): list=[] list=[', '.join(x) for x in querySet] return list return None @staticmethod def cn_to_pinyin(cn_words): pinyin = Pinyin().get_pinyin(cn_words).replace('-','').lower() pattern = re.compile('\\w*') result1 = pattern.findall(pinyin) result1 = ''.join(result1) return (result1.replace("_",''))
import re from django.db.models import QuerySet from xpinyin import Pinyin class CommonUtil: @staticmethod def toString(Obj): objtype=type(Obj) print(objtype) @staticmethod def querySetToList(querySet,field=""): if(type(querySet)==QuerySet): list=[] list=[', '.join(x) for x in querySet] return list return None @staticmethod def cn_to_pinyin(cn_words): pinyin = Pinyin().get_pinyin(cn_words).replace('-','').lower() pattern = re.compile('\\w*') result1 = pattern.findall(pinyin) result1 = ''.join(result1) return (result1.replace("_",''))
none
1
2.421498
2
weasyl/searchtag.py
hyena/weasyl
0
6613316
# searchtag.py import re import sqlalchemy as sa from libweasyl import staff from weasyl import define as d from weasyl import files from weasyl import ignoreuser from weasyl import macro as m from weasyl import orm from weasyl import welcome from weasyl.cache import region from weasyl.error import WeasylError _TAG_DELIMITER = re.compile(r"[\s,]+") def select(submitid=None, charid=None, journalid=None): return d.execute("SELECT st.title FROM searchtag st" " INNER JOIN searchmap%s sm USING (tagid)" " WHERE sm.targetid = %i" " ORDER BY st.title", [ "submit" if submitid else "char" if charid else "journal", submitid if submitid else charid if charid else journalid ], options="within") def select_with_artist_tags(submitid): db = d.connect() tags = ( db.query(orm.Tag.title, orm.SubmissionTag.is_artist_tag) .join(orm.SubmissionTag) .filter_by(targetid=submitid) .order_by(orm.Tag.title) .all()) ret = [] artist_tags = set() for tag, is_artist_tag in tags: ret.append(tag) if is_artist_tag: artist_tags.add(tag) return ret, artist_tags def can_remove_tags(userid, ownerid): return userid == ownerid or userid in staff.MODS or 'k' not in d.get_config(ownerid) def removable_tags(userid, ownerid, tags, artist_tags): if not can_remove_tags(userid, ownerid): return [tag for tag in tags if tag not in artist_tags] else: return tags def select_list(map_table, targetids): if not targetids: return {} mt = map_table q = ( d.sa .select([mt.c.targetid, d.sa.func.array_agg(mt.c.tagid)]) .select_from(mt) .where(mt.c.targetid.in_(targetids)) .group_by(mt.c.targetid)) db = d.connect() return dict(list(db.execute(q))) @region.cache_on_arguments() def get_or_create(name): name = d.get_search_tag(name) tag = d.engine.execute( 'INSERT INTO searchtag (title) VALUES (%(name)s) ON CONFLICT (title) DO NOTHING RETURNING tagid', name=name).scalar() if tag is not None: return tag return d.engine.execute( 'SELECT tagid FROM searchtag WHERE title = %(name)s', name=name).scalar() def get_ids(names): result = d.engine.execute( "SELECT tagid, title FROM searchtag WHERE title = ANY (%(names)s)", names=list(names)) return {row.title: row.tagid for row in result} def suggest(userid, target): if not target: return [] if userid: block = d.execute("SELECT tagid FROM blocktag WHERE userid = %i", [userid], options="within") query = list() target = d.get_search_tag(target) statement = ["SELECT title FROM searchtag WHERE title LIKE '%s%%'"] if userid and block: statement.append(" AND tagid NOT IN %s" % (d.sql_number_list(block),)) for i in d.execute("".join(statement + [" ORDER BY title LIMIT 10"]), [target], options="within"): query.append(i) statement = ["SELECT title FROM searchtag WHERE title LIKE '%%%s%%' AND title NOT LIKE '%s%%'"] if userid and block: statement.append(" AND tagid NOT IN %s" % (d.sql_number_list(block),)) for i in d.execute("".join(statement + [" ORDER BY title LIMIT 5"]), [target, target], options="within"): query.append(i) return query def tag_array(tagids): if not tagids: return None st = d.meta.tables['searchtag'] return sa.func.array( sa.select([st.c.title]) .where(st.c.tagid.in_(tagids)) .as_scalar()) def parse_tags(text): tags = set() for i in _TAG_DELIMITER.split(text): tag = d.get_search_tag(i) if tag: tags.add(tag) return tags def associate(userid, tags, submitid=None, charid=None, journalid=None): targetid = d.get_targetid(submitid, charid, journalid) # Assign table, feature, ownerid if submitid: table, feature = "searchmapsubmit", "submit" ownerid = d.get_ownerid(submitid=targetid) elif charid: table, feature = "searchmapchar", "char" ownerid = d.get_ownerid(charid=targetid) else: table, feature = "searchmapjournal", "journal" ownerid = d.get_ownerid(journalid=targetid) # Check permissions and invalid target if not ownerid: raise WeasylError("TargetRecordMissing") elif userid != ownerid and "g" in d.get_config(userid): raise WeasylError("InsufficientPermissions") elif ignoreuser.check(ownerid, userid): raise WeasylError("contentOwnerIgnoredYou") # Determine previous tags existing = d.engine.execute( "SELECT tagid, settings FROM {} WHERE targetid = %(target)s".format(table), target=targetid).fetchall() # Determine tag titles and tagids query = d.engine.execute( "SELECT tagid, title FROM searchtag WHERE title = ANY (%(tags)s)", tags=list(tags)).fetchall() newtags = list(tags - {x.title for x in query}) if newtags: query.extend( d.engine.execute( "INSERT INTO searchtag (title) SELECT * FROM UNNEST (%(newtags)s) AS title RETURNING tagid, title", newtags=newtags ).fetchall()) existing_tagids = {t.tagid for t in existing} entered_tagids = {t.tagid for t in query} # Assign added and removed added = entered_tagids - existing_tagids removed = existing_tagids - entered_tagids # Check removed artist tags if not can_remove_tags(userid, ownerid): existing_artist_tags = {t.tagid for t in existing if 'a' in t.settings} removed.difference_update(existing_artist_tags) entered_tagids.update(existing_artist_tags) # Remove tags if removed: d.engine.execute( "DELETE FROM {} WHERE targetid = %(target)s AND tagid = ANY (%(removed)s)".format(table), target=targetid, removed=list(removed)) if added: d.engine.execute( "INSERT INTO {} SELECT tag, %(target)s FROM UNNEST (%(added)s) AS tag".format(table), target=targetid, added=list(added)) if userid == ownerid: d.execute( "UPDATE %s SET settings = settings || 'a' WHERE targetid = %i AND tagid IN %s", [table, targetid, d.sql_number_list(list(added))]) if submitid: d.engine.execute( 'INSERT INTO submission_tags (submitid, tags) VALUES (%(submission)s, %(tags)s) ' 'ON CONFLICT (submitid) DO UPDATE SET tags = %(tags)s', submission=submitid, tags=list(entered_tagids)) db = d.connect() db.execute( d.meta.tables['tag_updates'].insert() .values(submitid=submitid, userid=userid, added=tag_array(added), removed=tag_array(removed))) if userid != ownerid: welcome.tag_update_insert(ownerid, submitid) files.append( "%stag.%s.%s.log" % (m.MACRO_SYS_LOG_PATH, feature, d.get_timestamp()), "-%sID %i -T %i -UID %i -X %s\n" % (feature[0].upper(), targetid, d.get_time(), userid, " ".join(tags))) def tag_history(submitid): db = d.connect() tu = d.meta.tables['tag_updates'] pr = d.meta.tables['profile'] return db.execute( sa.select([pr.c.username, tu.c.updated_at, tu.c.added, tu.c.removed]) .select_from(tu.join(pr, tu.c.userid == pr.c.userid)) .where(tu.c.submitid == submitid) .order_by(tu.c.updated_at.desc()))
# searchtag.py import re import sqlalchemy as sa from libweasyl import staff from weasyl import define as d from weasyl import files from weasyl import ignoreuser from weasyl import macro as m from weasyl import orm from weasyl import welcome from weasyl.cache import region from weasyl.error import WeasylError _TAG_DELIMITER = re.compile(r"[\s,]+") def select(submitid=None, charid=None, journalid=None): return d.execute("SELECT st.title FROM searchtag st" " INNER JOIN searchmap%s sm USING (tagid)" " WHERE sm.targetid = %i" " ORDER BY st.title", [ "submit" if submitid else "char" if charid else "journal", submitid if submitid else charid if charid else journalid ], options="within") def select_with_artist_tags(submitid): db = d.connect() tags = ( db.query(orm.Tag.title, orm.SubmissionTag.is_artist_tag) .join(orm.SubmissionTag) .filter_by(targetid=submitid) .order_by(orm.Tag.title) .all()) ret = [] artist_tags = set() for tag, is_artist_tag in tags: ret.append(tag) if is_artist_tag: artist_tags.add(tag) return ret, artist_tags def can_remove_tags(userid, ownerid): return userid == ownerid or userid in staff.MODS or 'k' not in d.get_config(ownerid) def removable_tags(userid, ownerid, tags, artist_tags): if not can_remove_tags(userid, ownerid): return [tag for tag in tags if tag not in artist_tags] else: return tags def select_list(map_table, targetids): if not targetids: return {} mt = map_table q = ( d.sa .select([mt.c.targetid, d.sa.func.array_agg(mt.c.tagid)]) .select_from(mt) .where(mt.c.targetid.in_(targetids)) .group_by(mt.c.targetid)) db = d.connect() return dict(list(db.execute(q))) @region.cache_on_arguments() def get_or_create(name): name = d.get_search_tag(name) tag = d.engine.execute( 'INSERT INTO searchtag (title) VALUES (%(name)s) ON CONFLICT (title) DO NOTHING RETURNING tagid', name=name).scalar() if tag is not None: return tag return d.engine.execute( 'SELECT tagid FROM searchtag WHERE title = %(name)s', name=name).scalar() def get_ids(names): result = d.engine.execute( "SELECT tagid, title FROM searchtag WHERE title = ANY (%(names)s)", names=list(names)) return {row.title: row.tagid for row in result} def suggest(userid, target): if not target: return [] if userid: block = d.execute("SELECT tagid FROM blocktag WHERE userid = %i", [userid], options="within") query = list() target = d.get_search_tag(target) statement = ["SELECT title FROM searchtag WHERE title LIKE '%s%%'"] if userid and block: statement.append(" AND tagid NOT IN %s" % (d.sql_number_list(block),)) for i in d.execute("".join(statement + [" ORDER BY title LIMIT 10"]), [target], options="within"): query.append(i) statement = ["SELECT title FROM searchtag WHERE title LIKE '%%%s%%' AND title NOT LIKE '%s%%'"] if userid and block: statement.append(" AND tagid NOT IN %s" % (d.sql_number_list(block),)) for i in d.execute("".join(statement + [" ORDER BY title LIMIT 5"]), [target, target], options="within"): query.append(i) return query def tag_array(tagids): if not tagids: return None st = d.meta.tables['searchtag'] return sa.func.array( sa.select([st.c.title]) .where(st.c.tagid.in_(tagids)) .as_scalar()) def parse_tags(text): tags = set() for i in _TAG_DELIMITER.split(text): tag = d.get_search_tag(i) if tag: tags.add(tag) return tags def associate(userid, tags, submitid=None, charid=None, journalid=None): targetid = d.get_targetid(submitid, charid, journalid) # Assign table, feature, ownerid if submitid: table, feature = "searchmapsubmit", "submit" ownerid = d.get_ownerid(submitid=targetid) elif charid: table, feature = "searchmapchar", "char" ownerid = d.get_ownerid(charid=targetid) else: table, feature = "searchmapjournal", "journal" ownerid = d.get_ownerid(journalid=targetid) # Check permissions and invalid target if not ownerid: raise WeasylError("TargetRecordMissing") elif userid != ownerid and "g" in d.get_config(userid): raise WeasylError("InsufficientPermissions") elif ignoreuser.check(ownerid, userid): raise WeasylError("contentOwnerIgnoredYou") # Determine previous tags existing = d.engine.execute( "SELECT tagid, settings FROM {} WHERE targetid = %(target)s".format(table), target=targetid).fetchall() # Determine tag titles and tagids query = d.engine.execute( "SELECT tagid, title FROM searchtag WHERE title = ANY (%(tags)s)", tags=list(tags)).fetchall() newtags = list(tags - {x.title for x in query}) if newtags: query.extend( d.engine.execute( "INSERT INTO searchtag (title) SELECT * FROM UNNEST (%(newtags)s) AS title RETURNING tagid, title", newtags=newtags ).fetchall()) existing_tagids = {t.tagid for t in existing} entered_tagids = {t.tagid for t in query} # Assign added and removed added = entered_tagids - existing_tagids removed = existing_tagids - entered_tagids # Check removed artist tags if not can_remove_tags(userid, ownerid): existing_artist_tags = {t.tagid for t in existing if 'a' in t.settings} removed.difference_update(existing_artist_tags) entered_tagids.update(existing_artist_tags) # Remove tags if removed: d.engine.execute( "DELETE FROM {} WHERE targetid = %(target)s AND tagid = ANY (%(removed)s)".format(table), target=targetid, removed=list(removed)) if added: d.engine.execute( "INSERT INTO {} SELECT tag, %(target)s FROM UNNEST (%(added)s) AS tag".format(table), target=targetid, added=list(added)) if userid == ownerid: d.execute( "UPDATE %s SET settings = settings || 'a' WHERE targetid = %i AND tagid IN %s", [table, targetid, d.sql_number_list(list(added))]) if submitid: d.engine.execute( 'INSERT INTO submission_tags (submitid, tags) VALUES (%(submission)s, %(tags)s) ' 'ON CONFLICT (submitid) DO UPDATE SET tags = %(tags)s', submission=submitid, tags=list(entered_tagids)) db = d.connect() db.execute( d.meta.tables['tag_updates'].insert() .values(submitid=submitid, userid=userid, added=tag_array(added), removed=tag_array(removed))) if userid != ownerid: welcome.tag_update_insert(ownerid, submitid) files.append( "%stag.%s.%s.log" % (m.MACRO_SYS_LOG_PATH, feature, d.get_timestamp()), "-%sID %i -T %i -UID %i -X %s\n" % (feature[0].upper(), targetid, d.get_time(), userid, " ".join(tags))) def tag_history(submitid): db = d.connect() tu = d.meta.tables['tag_updates'] pr = d.meta.tables['profile'] return db.execute( sa.select([pr.c.username, tu.c.updated_at, tu.c.added, tu.c.removed]) .select_from(tu.join(pr, tu.c.userid == pr.c.userid)) .where(tu.c.submitid == submitid) .order_by(tu.c.updated_at.desc()))
en
0.619622
# searchtag.py # Assign table, feature, ownerid # Check permissions and invalid target # Determine previous tags # Determine tag titles and tagids # Assign added and removed # Check removed artist tags # Remove tags
2.255568
2
tests/__init__.py
nullp0tr/Bluew
17
6613317
<filename>tests/__init__.py from bluew import *
<filename>tests/__init__.py from bluew import *
none
1
1.017169
1
rbac/tests/test_review.py
shawnmckinney/py-fortress
16
6613318
''' @copyright: 2022 - Symas Corporation ''' import unittest from rbac import review from rbac.model import User, Role, Perm from rbac.cli.utils import print_ln, print_entity class BasicTestSuite(unittest.TestCase): """These tests the py-fortress review functions.""" class TestReivewMgr(unittest.TestCase): """ Test the review funcs """ def test01_assigned_users(self): """ Test the assigned users method """ print_ln('test_assigned_users') try: rList = review.find_roles(Role(name='py-role*')) for rle in rList: print_ln("Assigned users role=" + rle.name) uList = review.assigned_users(rle) for user in uList: print_ln("Assigned user=" + user, 1) except Exception as e: self.fail('test_assigned_users failed, exception=' + e.msg) def test02_assigned_roles(self): """ Test the assigned roles method """ print_ln('test_assigned_roles') try: uList = review.find_users(User(uid='py-user*')) for usr in uList: print_ln("Assigned roles user=" + usr.uid) rList = review.assigned_roles(usr) for role in rList: print_entity(role, "Assigned role", 1) except Exception as e: self.fail('test_assigned_roles failed, exception=' + e.msg) def test03_perm_roles(self): """ Test the perm roles method """ print_ln('test16_perm_roles') try: pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*')) for perm in pList: print_ln("Role Perm obj name=" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id) rList = review.perm_roles(perm) for role in rList: print_ln("Assigned role=" + role, 1) except Exception as e: self.fail('test16_perm_roles failed, exception=' + e.msg) def test04_role_perms(self): """ Test the role perms method """ print_ln('test_role_perms') try: rList = review.find_roles(Role(name='py-role*')) for rle in rList: print_ln("Perm Roles name=" + rle.name) pList = review.role_perms(rle) for perm in pList: print_ln("Assigned perm obj name=" + perm.obj_name + ', op name=' + perm.op_name + ', obj id=' + perm.obj_id, 1) except Exception as e: self.fail('test_role_perms failed, exception=' + e.msg) def test05_user_perms(self): """ Test the user perms method """ print_ln('test_user_perms') try: uList = review.find_users(User(uid='py-user*')) for usr in uList: print_ln("Assigned perms user=" + usr.uid) pList = review.user_perms(usr) for perm in pList: print_ln("Assigned perm obj name=" + perm.obj_name + ', op name=' + perm.op_name + ', obj id=' + perm.obj_id, 1) except Exception as e: self.fail('test_user_perms failed, exception=' + e.msg) def test06_perm_users(self): """ Test the perm users method """ print_ln('test_perm_users') try: pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*')) for perm in pList: print_ln("Perm obj name=" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id) uList = review.perm_users(perm) for user in uList: print_ln("Assigned user=" + user.uid, 1) except Exception as e: self.fail('test_perm_users failed, exception=' + e.msg) def suite(): suite = unittest.TestSuite() suite.addTest(TestReivewMgr('test01_assigned_users')) suite.addTest(TestReivewMgr('test02_assigned_roles')) suite.addTest(TestReivewMgr('test03_perm_roles')) suite.addTest(TestReivewMgr('test04_role_perms')) suite.addTest(TestReivewMgr('test05_user_perms')) suite.addTest(TestReivewMgr('test06_perm_users')) return suite if __name__ == '__main__': runner = unittest.TextTestRunner(failfast=True) runner.run(suite())
''' @copyright: 2022 - Symas Corporation ''' import unittest from rbac import review from rbac.model import User, Role, Perm from rbac.cli.utils import print_ln, print_entity class BasicTestSuite(unittest.TestCase): """These tests the py-fortress review functions.""" class TestReivewMgr(unittest.TestCase): """ Test the review funcs """ def test01_assigned_users(self): """ Test the assigned users method """ print_ln('test_assigned_users') try: rList = review.find_roles(Role(name='py-role*')) for rle in rList: print_ln("Assigned users role=" + rle.name) uList = review.assigned_users(rle) for user in uList: print_ln("Assigned user=" + user, 1) except Exception as e: self.fail('test_assigned_users failed, exception=' + e.msg) def test02_assigned_roles(self): """ Test the assigned roles method """ print_ln('test_assigned_roles') try: uList = review.find_users(User(uid='py-user*')) for usr in uList: print_ln("Assigned roles user=" + usr.uid) rList = review.assigned_roles(usr) for role in rList: print_entity(role, "Assigned role", 1) except Exception as e: self.fail('test_assigned_roles failed, exception=' + e.msg) def test03_perm_roles(self): """ Test the perm roles method """ print_ln('test16_perm_roles') try: pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*')) for perm in pList: print_ln("Role Perm obj name=" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id) rList = review.perm_roles(perm) for role in rList: print_ln("Assigned role=" + role, 1) except Exception as e: self.fail('test16_perm_roles failed, exception=' + e.msg) def test04_role_perms(self): """ Test the role perms method """ print_ln('test_role_perms') try: rList = review.find_roles(Role(name='py-role*')) for rle in rList: print_ln("Perm Roles name=" + rle.name) pList = review.role_perms(rle) for perm in pList: print_ln("Assigned perm obj name=" + perm.obj_name + ', op name=' + perm.op_name + ', obj id=' + perm.obj_id, 1) except Exception as e: self.fail('test_role_perms failed, exception=' + e.msg) def test05_user_perms(self): """ Test the user perms method """ print_ln('test_user_perms') try: uList = review.find_users(User(uid='py-user*')) for usr in uList: print_ln("Assigned perms user=" + usr.uid) pList = review.user_perms(usr) for perm in pList: print_ln("Assigned perm obj name=" + perm.obj_name + ', op name=' + perm.op_name + ', obj id=' + perm.obj_id, 1) except Exception as e: self.fail('test_user_perms failed, exception=' + e.msg) def test06_perm_users(self): """ Test the perm users method """ print_ln('test_perm_users') try: pList = review.find_perms(Perm(obj_name='py-obj*', op_name='*')) for perm in pList: print_ln("Perm obj name=" + perm.obj_name + ', op=' + perm.op_name + ', id=' + perm.obj_id) uList = review.perm_users(perm) for user in uList: print_ln("Assigned user=" + user.uid, 1) except Exception as e: self.fail('test_perm_users failed, exception=' + e.msg) def suite(): suite = unittest.TestSuite() suite.addTest(TestReivewMgr('test01_assigned_users')) suite.addTest(TestReivewMgr('test02_assigned_roles')) suite.addTest(TestReivewMgr('test03_perm_roles')) suite.addTest(TestReivewMgr('test04_role_perms')) suite.addTest(TestReivewMgr('test05_user_perms')) suite.addTest(TestReivewMgr('test06_perm_users')) return suite if __name__ == '__main__': runner = unittest.TextTestRunner(failfast=True) runner.run(suite())
en
0.682593
@copyright: 2022 - Symas Corporation These tests the py-fortress review functions. Test the review funcs Test the assigned users method Test the assigned roles method Test the perm roles method Test the role perms method Test the user perms method Test the perm users method
2.671585
3
main.py
Kanata-Bifang/Nerual-networks-for-video-transfer
2
6613319
<reponame>Kanata-Bifang/Nerual-networks-for-video-transfer from models.videonet import * from models.fast_style_neural import * from options import get_options import os def get_model(opt): if opt.model == 'videonet': return videoNet(opt) elif opt.model == 'style': return styleNet(opt) else: raise NotImplementedError('Such model doesn\'t exist.') def train(opt): opt.is_train = True if not os.path.exists(opt.save_path): os.mkdir(opt.save_path) model = get_model(opt) model.train() def test(opt): opt.is_train = False if not os.path.exists(opt.save_path): os.mkdir(opt.save_path) model = get_model(opt) model.test(opt.load_epoch) if __name__ == '__main__': opt = get_options() if not opt.eval: train(opt) else: test(opt)
from models.videonet import * from models.fast_style_neural import * from options import get_options import os def get_model(opt): if opt.model == 'videonet': return videoNet(opt) elif opt.model == 'style': return styleNet(opt) else: raise NotImplementedError('Such model doesn\'t exist.') def train(opt): opt.is_train = True if not os.path.exists(opt.save_path): os.mkdir(opt.save_path) model = get_model(opt) model.train() def test(opt): opt.is_train = False if not os.path.exists(opt.save_path): os.mkdir(opt.save_path) model = get_model(opt) model.test(opt.load_epoch) if __name__ == '__main__': opt = get_options() if not opt.eval: train(opt) else: test(opt)
none
1
2.350088
2
src/timeatlas/models/NN/dataloader/prediction_dataloader.py
fredmontet/timeatlas
10
6613320
<reponame>fredmontet/timeatlas try: from torch.utils.data import Subset except ModuleNotFoundError: raise ModuleNotFoundError("Pytorch not found. Install with pip install torch") from .base_dataloader import BaseDataLoader class PredictionDataLoader(BaseDataLoader): def __init__(self, dataset, *args, **kwargs): super(PredictionDataLoader, self).__init__(dataset=dataset, *args, **kwargs) def split(self, train: float, test: float or None = None, validation: float or None = None): """Split Dataset into train, test and validation Splitting a dataset into train, test and validation by percentages. Args: train: percentage of train split test: percentage of test split validation: percentage of validation split Returns: Subset of the DataSet """ idx = list(range(len(self.dataset))) n = len(self.dataset) if validation is None: n_train = int(n * train) train_idx = idx[:n_train] test_idx = idx[n_train:] train_set = Subset(self.dataset, train_idx) test_set = Subset(self.dataset, test_idx) return train_set, test_set else: if test is None: test = 1 - train - validation n_train = int(n * train) n_test = int(n * test) train_idx = idx[:n_train] val_idx = idx[n_train:(n_train + n_test)] test_idx = idx[(n_train + n_test):] train_set = Subset(self.dataset, train_idx) val_set = Subset(self.dataset, val_idx) test_set = Subset(self.dataset, test_idx) return train_set, val_set, test_set
try: from torch.utils.data import Subset except ModuleNotFoundError: raise ModuleNotFoundError("Pytorch not found. Install with pip install torch") from .base_dataloader import BaseDataLoader class PredictionDataLoader(BaseDataLoader): def __init__(self, dataset, *args, **kwargs): super(PredictionDataLoader, self).__init__(dataset=dataset, *args, **kwargs) def split(self, train: float, test: float or None = None, validation: float or None = None): """Split Dataset into train, test and validation Splitting a dataset into train, test and validation by percentages. Args: train: percentage of train split test: percentage of test split validation: percentage of validation split Returns: Subset of the DataSet """ idx = list(range(len(self.dataset))) n = len(self.dataset) if validation is None: n_train = int(n * train) train_idx = idx[:n_train] test_idx = idx[n_train:] train_set = Subset(self.dataset, train_idx) test_set = Subset(self.dataset, test_idx) return train_set, test_set else: if test is None: test = 1 - train - validation n_train = int(n * train) n_test = int(n * test) train_idx = idx[:n_train] val_idx = idx[n_train:(n_train + n_test)] test_idx = idx[(n_train + n_test):] train_set = Subset(self.dataset, train_idx) val_set = Subset(self.dataset, val_idx) test_set = Subset(self.dataset, test_idx) return train_set, val_set, test_set
en
0.820326
Split Dataset into train, test and validation Splitting a dataset into train, test and validation by percentages. Args: train: percentage of train split test: percentage of test split validation: percentage of validation split Returns: Subset of the DataSet
3.109659
3
polog/handlers/file/locks/abstract_single_lock.py
pomponchik/polog
30
6613321
class AbstractSingleLock: """ Все единичные классы блокировок, унаследованные от данного класса: 1. Должны переопределить методы .acquire() и .release(). 2. Являются отключаемыми. То есть, после вызова метода .off() у их экземпляров они просто не работают. """ active = True def off(self): """ Отключение блокировки. После вызова данного метода методы .acquire() и .release() перестают работать. Откатить это нельзя, операция одноразовая, поэтому рекомендуется применять при инициализации экземпляра отнаследованного класса. """ self.acquire = self.empty_acquire self.release = self.empty_release self.active = False def acquire(self): """ Взять лок. Должно быть переопределено наследником. """ raise NotImplementedError('The basic action for the blocking class is not spelled out.') def release(self): """ Отпустить лок. Должно быть переопределено наследником. """ raise NotImplementedError('The basic action for the blocking class is not spelled out.') def empty_acquire(self): """ Сделать вид, что взял лок. """ pass def empty_release(self): """ Сделать вид, что отпустил лок. """ pass
class AbstractSingleLock: """ Все единичные классы блокировок, унаследованные от данного класса: 1. Должны переопределить методы .acquire() и .release(). 2. Являются отключаемыми. То есть, после вызова метода .off() у их экземпляров они просто не работают. """ active = True def off(self): """ Отключение блокировки. После вызова данного метода методы .acquire() и .release() перестают работать. Откатить это нельзя, операция одноразовая, поэтому рекомендуется применять при инициализации экземпляра отнаследованного класса. """ self.acquire = self.empty_acquire self.release = self.empty_release self.active = False def acquire(self): """ Взять лок. Должно быть переопределено наследником. """ raise NotImplementedError('The basic action for the blocking class is not spelled out.') def release(self): """ Отпустить лок. Должно быть переопределено наследником. """ raise NotImplementedError('The basic action for the blocking class is not spelled out.') def empty_acquire(self): """ Сделать вид, что взял лок. """ pass def empty_release(self): """ Сделать вид, что отпустил лок. """ pass
ru
0.99639
Все единичные классы блокировок, унаследованные от данного класса: 1. Должны переопределить методы .acquire() и .release(). 2. Являются отключаемыми. То есть, после вызова метода .off() у их экземпляров они просто не работают. Отключение блокировки. После вызова данного метода методы .acquire() и .release() перестают работать. Откатить это нельзя, операция одноразовая, поэтому рекомендуется применять при инициализации экземпляра отнаследованного класса. Взять лок. Должно быть переопределено наследником. Отпустить лок. Должно быть переопределено наследником. Сделать вид, что взял лок. Сделать вид, что отпустил лок.
2.807217
3
PyChat/client/protocol/FileClientProtocol.py
leosartaj/PyChat
7
6613322
#!/usr/bin/env python2 ## # PyChat # https://github.com/leosartaj/PyChat.git # # Copyright (c) 2014 <NAME> # Licensed under the MIT license. ## # system import import os import struct import cPickle as pickle # twisted imports from twisted.python import log from twisted.protocols import basic # user import from FileSender import FileSender from PyChat import command as cmd # prefix for commands SERVER_PREFIX = cmd.SERVER_PREFIX def dict_to_pickle(pickle_dict): """ convert from a dictionary to pickle """ return pickle.dumps(pickle_dict) def pickle_to_dict(pickle_str): """ convert from pickle to dictionary """ return pickle.loads(pickle_str) class FileClientProtocol(basic.Int32StringReceiver): """ Implements file transfer protocol """ def connectionMade(self): self.chatproto = self.factory.chatproto self._register() self.sending = False self.sfile = [None, None] self.rfile = {} def _register(self): """ Register with the ftp server send the refrence to the chatproto """ self.sendString(self.chatproto.setName) if self.factory.deferred: deferred, self.factory.deferred = self.factory.deferred, None deferred.callback(self) def stringReceived(self, line): """ Handles recieved file lines """ peername, line = self._parse(line) if line: self.update(peername, line) def _parse(self, line): """ Parse line for commands returns string to be logged otherwise simply returns line without change """ peername, line = cmd.extractFirst(line) comd, val = cmd.parse(line, SERVER_PREFIX) if comd == 'eof': value = 'File sent: %s' %(self.sfile) peername = 'me' self._reset() else: pickle_dict = pickle_to_dict(val) value = self._parseDict(pickle_dict) return peername, value def _parseDict(self, pickle_dict): """ Parses the pickle_dict Takes measures on the files """ fName = pickle_dict['filename'] if pickle_dict.has_key('eof'): value = self._closeFile(fName) elif pickle_dict.has_key('fail'): value = self._closeFile(fName, False) elif pickle_dict.has_key('line'): saveline = pickle_dict['line'] value = self._saveFile(fName, saveline) else: return None return value def update(self, name, msg, logit=True): """ Updates the gui logs the messages if logit set to true """ self.chatproto.update(name, msg) if logit: log.msg(msg) def status(self): """ gives the status of sending and receiving returns a tuple sending(bool) """ return self.sending def sendFile(self, fName): """ Sends file to the server """ self.sending = True filename = os.path.basename(fName) self.sfile = filename fileprotocol = FileSender() sendfile, startsend = fileprotocol.beginFileTransfer(fName, self.transport, self.transform) sendfile.addCallback(self._endTransfer) sendfile.addErrback(self._sendingFailed) startsend.callback(1) def _getDict(self): """ Returns a dictionary """ pickle_dict = {} pickle_dict['filename'] = self.sfile return pickle_dict def transform(self, line): """ Transforms a line to be saved in a file """ pickle_dict = self._getDict() pickle_dict['line'] = line pickle_str = dict_to_pickle(pickle_dict) # prefix, for the protocol as file sender does not use sendString prefix = struct.pack(self.structFormat, len(pickle_str)) pickle_str = prefix + pickle_str return pickle_str def _endTransfer(self, *args): """ End file transfer """ pickle_dict = self._getDict() pickle_dict['eof'] = True pickle_str = dict_to_pickle(pickle_dict) self.sendString(pickle_str) self.sendString(cmd.servercmd('eof', self.sfile)) def _sendingFailed(self, exc): log.msg(exc) msg = 'File Sending failed' self.update('me', msg) pickle_dict = self._getDict() pickle_dict['fail'] = True pickle_str = dict_to_pickle(pickle_dict) self.sendString(pickle_str) self.sendString(cmd.servercmd('fail', self.sfile)) def _reset(self): """ Reset the variables """ self.sending = False self.sfile = [None, None] def _initFile(self, fName='unnamed', dire=os.getcwd(), prefix='pychat_'): """ opens a file returns the handler """ filename = os.path.basename(fName) path = os.path.join(dire, prefix + filename) handler = open(path, 'w') return handler def _saveFile(self, fName, fline): """ Parses the line saves the line in the file returns the result string """ if not self.rfile.has_key(fName): handler = self._initFile(fName) self.rfile[fName] = handler value = 'Recieving: ' + fName elif self.rfile.has_key(fName): handler = self.rfile[fName] value = None else: return handler.write(fline) return value def _closeFile(self, fName, status=True): """ safely closes the file cleans up rfiles dict returns the result """ handler = self.rfile[fName] handler.close() del self.rfile[fName] if status: value = 'Recieved: ' + fName else: value = 'File could not be received: ' + fName return value
#!/usr/bin/env python2 ## # PyChat # https://github.com/leosartaj/PyChat.git # # Copyright (c) 2014 <NAME> # Licensed under the MIT license. ## # system import import os import struct import cPickle as pickle # twisted imports from twisted.python import log from twisted.protocols import basic # user import from FileSender import FileSender from PyChat import command as cmd # prefix for commands SERVER_PREFIX = cmd.SERVER_PREFIX def dict_to_pickle(pickle_dict): """ convert from a dictionary to pickle """ return pickle.dumps(pickle_dict) def pickle_to_dict(pickle_str): """ convert from pickle to dictionary """ return pickle.loads(pickle_str) class FileClientProtocol(basic.Int32StringReceiver): """ Implements file transfer protocol """ def connectionMade(self): self.chatproto = self.factory.chatproto self._register() self.sending = False self.sfile = [None, None] self.rfile = {} def _register(self): """ Register with the ftp server send the refrence to the chatproto """ self.sendString(self.chatproto.setName) if self.factory.deferred: deferred, self.factory.deferred = self.factory.deferred, None deferred.callback(self) def stringReceived(self, line): """ Handles recieved file lines """ peername, line = self._parse(line) if line: self.update(peername, line) def _parse(self, line): """ Parse line for commands returns string to be logged otherwise simply returns line without change """ peername, line = cmd.extractFirst(line) comd, val = cmd.parse(line, SERVER_PREFIX) if comd == 'eof': value = 'File sent: %s' %(self.sfile) peername = 'me' self._reset() else: pickle_dict = pickle_to_dict(val) value = self._parseDict(pickle_dict) return peername, value def _parseDict(self, pickle_dict): """ Parses the pickle_dict Takes measures on the files """ fName = pickle_dict['filename'] if pickle_dict.has_key('eof'): value = self._closeFile(fName) elif pickle_dict.has_key('fail'): value = self._closeFile(fName, False) elif pickle_dict.has_key('line'): saveline = pickle_dict['line'] value = self._saveFile(fName, saveline) else: return None return value def update(self, name, msg, logit=True): """ Updates the gui logs the messages if logit set to true """ self.chatproto.update(name, msg) if logit: log.msg(msg) def status(self): """ gives the status of sending and receiving returns a tuple sending(bool) """ return self.sending def sendFile(self, fName): """ Sends file to the server """ self.sending = True filename = os.path.basename(fName) self.sfile = filename fileprotocol = FileSender() sendfile, startsend = fileprotocol.beginFileTransfer(fName, self.transport, self.transform) sendfile.addCallback(self._endTransfer) sendfile.addErrback(self._sendingFailed) startsend.callback(1) def _getDict(self): """ Returns a dictionary """ pickle_dict = {} pickle_dict['filename'] = self.sfile return pickle_dict def transform(self, line): """ Transforms a line to be saved in a file """ pickle_dict = self._getDict() pickle_dict['line'] = line pickle_str = dict_to_pickle(pickle_dict) # prefix, for the protocol as file sender does not use sendString prefix = struct.pack(self.structFormat, len(pickle_str)) pickle_str = prefix + pickle_str return pickle_str def _endTransfer(self, *args): """ End file transfer """ pickle_dict = self._getDict() pickle_dict['eof'] = True pickle_str = dict_to_pickle(pickle_dict) self.sendString(pickle_str) self.sendString(cmd.servercmd('eof', self.sfile)) def _sendingFailed(self, exc): log.msg(exc) msg = 'File Sending failed' self.update('me', msg) pickle_dict = self._getDict() pickle_dict['fail'] = True pickle_str = dict_to_pickle(pickle_dict) self.sendString(pickle_str) self.sendString(cmd.servercmd('fail', self.sfile)) def _reset(self): """ Reset the variables """ self.sending = False self.sfile = [None, None] def _initFile(self, fName='unnamed', dire=os.getcwd(), prefix='pychat_'): """ opens a file returns the handler """ filename = os.path.basename(fName) path = os.path.join(dire, prefix + filename) handler = open(path, 'w') return handler def _saveFile(self, fName, fline): """ Parses the line saves the line in the file returns the result string """ if not self.rfile.has_key(fName): handler = self._initFile(fName) self.rfile[fName] = handler value = 'Recieving: ' + fName elif self.rfile.has_key(fName): handler = self.rfile[fName] value = None else: return handler.write(fline) return value def _closeFile(self, fName, status=True): """ safely closes the file cleans up rfiles dict returns the result """ handler = self.rfile[fName] handler.close() del self.rfile[fName] if status: value = 'Recieved: ' + fName else: value = 'File could not be received: ' + fName return value
en
0.694874
#!/usr/bin/env python2 ## # PyChat # https://github.com/leosartaj/PyChat.git # # Copyright (c) 2014 <NAME> # Licensed under the MIT license. ## # system import # twisted imports # user import # prefix for commands convert from a dictionary to pickle convert from pickle to dictionary Implements file transfer protocol Register with the ftp server send the refrence to the chatproto Handles recieved file lines Parse line for commands returns string to be logged otherwise simply returns line without change Parses the pickle_dict Takes measures on the files Updates the gui logs the messages if logit set to true gives the status of sending and receiving returns a tuple sending(bool) Sends file to the server Returns a dictionary Transforms a line to be saved in a file # prefix, for the protocol as file sender does not use sendString End file transfer Reset the variables opens a file returns the handler Parses the line saves the line in the file returns the result string safely closes the file cleans up rfiles dict returns the result
2.505916
3
ILI/IDEC.py
arodriguezca/DeepOutbreak
1
6613323
<reponame>arodriguezca/DeepOutbreak """ Toy implementation for Improved Deep Embedded Clustering as described in paper: <NAME>, <NAME>, <NAME>, <NAME>. Improved Deep Embedded Clustering with Local Structure Preservation. IJCAI 2017. The Autoencoder is pretrained directly in an end-to-end manner, NOT greedy layer-wise training. So the results are different with what reported in the paper. Usage: No pretrained autoencoder weights available: python IDEC.py mnist python IDEC.py usps python IDEC.py reutersidf10k --n_clusters 4 Weights of Pretrained autoencoder for mnist are in './ae_weights/mnist_ae_weights.h5': python IDEC.py mnist --ae_weights ./ae_weights/mnist_ae_weights.h5 Author: <NAME>. 2017.4.30 """ from time import time import numpy as np from keras.models import Model from keras.optimizers import SGD from keras.utils.vis_utils import plot_model from sklearn.cluster import KMeans from sklearn import metrics from DEC import cluster_acc, ClusteringLayer, autoencoder class IDEC(object): def __init__(self, dims, n_clusters=4, alpha=1.0): super(IDEC, self).__init__() self.dims = dims self.input_dim = dims[0] self.n_stacks = len(self.dims) - 1 self.n_clusters = n_clusters self.alpha = alpha self.autoencoder = autoencoder(self.dims) hidden = self.autoencoder.get_layer(name='encoder_%d' % (self.n_stacks - 1)).output self.encoder = Model(inputs=self.autoencoder.input, outputs=hidden) # prepare IDEC model clustering_layer = ClusteringLayer(self.n_clusters, alpha=self.alpha, name='clustering')(hidden) self.model = Model(inputs=self.autoencoder.input, outputs=[clustering_layer, self.autoencoder.output]) self.pretrained = False self.centers = [] self.y_pred = [] def pretrain(self, x, batch_size=256, epochs=200, optimizer='adam'): print('...Pretraining...') self.autoencoder.compile(loss='mse', optimizer=optimizer) # SGD(lr=0.01, momentum=0.9), self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs) self.autoencoder.save_weights('ae_weights.h5') print('Pretrained weights are saved to ./ae_weights.h5') self.pretrained = True def load_weights(self, weights_path): # load weights of IDEC model self.model.load_weights(weights_path) def extract_feature(self, x): # extract features from before clustering layer return self.encoder.predict(x) def predict_clusters(self, x): # predict cluster labels using the output of clustering layer q, _ = self.model.predict(x, verbose=0) return q.argmax(1) @staticmethod def target_distribution(q): # target distribution P which enhances the discrimination of soft label Q weight = q ** 2 / q.sum(0) return (weight.T / weight.sum(1)).T def compile(self, loss=['kld', 'mse'], loss_weights=[1, 1], optimizer='adam'): self.model.compile(loss=loss, loss_weights=loss_weights, optimizer=optimizer) def fit(self, x, y=None, batch_size=256, maxiter=2e4, tol=1e-3, update_interval=140, ae_weights=None, save_dir='./results/idec'): print('Update interval', update_interval) save_interval = int(x.shape[0] / batch_size) * 5 # 5 epochs save_interval = 50 print('Save interval', save_interval) # Step 1: pretrain if not self.pretrained and ae_weights is None: print('...pretraining autoencoders using default hyper-parameters:') print(' optimizer=\'adam\'; epochs=200') self.pretrain(x, batch_size) self.pretrained = True elif ae_weights is not None: self.autoencoder.load_weights(ae_weights) print('ae_weights is loaded successfully.') # Step 2: initialize cluster centers using k-means print('Initializing cluster centers with k-means.') kmeans = KMeans(n_clusters=self.n_clusters, n_init=4) self.y_pred = kmeans.fit_predict(self.encoder.predict(x)) y_pred_last = np.copy(self.y_pred) self.model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_]) # Step 3: deep clustering # logging file import csv, os if not os.path.exists(save_dir): os.makedirs(save_dir) logfile = open(save_dir + '/idec_log.csv', 'w') logwriter = csv.DictWriter(logfile, fieldnames=['iter', 'acc', 'nmi', 'ari', 'L', 'Lc', 'Lr']) logwriter.writeheader() loss = [0, 0, 0] index = 0 for ite in range(int(maxiter)): if ite % update_interval == 0: q, _ = self.model.predict(x, verbose=0) p = self.target_distribution(q) # update the auxiliary target distribution p # evaluate the clustering performance self.y_pred = q.argmax(1) if y is not None: acc = np.round(cluster_acc(y, self.y_pred), 5) nmi = np.round(metrics.normalized_mutual_info_score(y, self.y_pred), 5) ari = np.round(metrics.adjusted_rand_score(y, self.y_pred), 5) loss = np.round(loss, 5) logwriter.writerow(dict(iter=ite, acc=acc, nmi=nmi, ari=ari, L=loss[0], Lc=loss[1], Lr=loss[2])) print('Iter-%d: ACC= %.4f, NMI= %.4f, ARI= %.4f; L= %.5f, Lc= %.5f, Lr= %.5f' % (ite, acc, nmi, ari, loss[0], loss[1], loss[2])) # check stop criterion delta_label = np.sum(self.y_pred != y_pred_last).astype(np.float32) / self.y_pred.shape[0] y_pred_last = np.copy(self.y_pred) if ite > 0 and delta_label < tol: print('delta_label ', delta_label, '< tol ', tol) print('Reached tolerance threshold. Stopping training.') logfile.close() break # train on batch if (index + 1) * batch_size > x.shape[0]: loss = self.model.train_on_batch(x=x[index * batch_size::], y=[p[index * batch_size::], x[index * batch_size::]]) index = 0 else: loss = self.model.train_on_batch(x=x[index * batch_size:(index + 1) * batch_size], y=[p[index * batch_size:(index + 1) * batch_size], x[index * batch_size:(index + 1) * batch_size]]) index += 1 # save intermediate model if ite % save_interval == 0: # save IDEC model checkpoints print('saving model to: ' + save_dir + '/IDEC_model_' + str(ite) + '.h5') self.model.save_weights(save_dir + '/IDEC_model_' + str(ite) + '.h5') ite += 1 # save the trained model logfile.close() print('saving model to: ' + save_dir + '/IDEC_model_final.h5') self.model.save_weights(save_dir + '/IDEC_model_final.h5') return self.y_pred if __name__ == "__main__": # setting the hyper parameters import argparse parser = argparse.ArgumentParser(description='train', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('dataset', default='mydata', choices=['mnist', 'usps', 'reutersidf10k', 'pendigits', 'mydata']) parser.add_argument('--n_clusters', default=4, type=int) parser.add_argument('--batch_size', default=256, type=int) parser.add_argument('--maxiter', default=2e4, type=int) parser.add_argument('--pretrain_epochs', default=200, type=int) parser.add_argument('--gamma', default=0.1, type=float, help='coefficient of clustering loss') parser.add_argument('--update_interval', default=1, type=int) parser.add_argument('--tol', default=0.001, type=float) parser.add_argument('--ae_weights', default=None) parser.add_argument('--save_dir', default='results/idec') args = parser.parse_args() print(args) # load dataset optimizer = 'adam' # SGD(lr=0.01, momentum=0.99) from datasets import load_mnist, load_reuters, load_usps, load_pendigits, load_mydata if args.dataset == 'mnist': # recommends: n_clusters=10, update_interval=140 x, y = load_mnist() elif args.dataset == 'usps': # recommends: n_clusters=10, update_interval=30 x, y = load_usps('data/usps') elif args.dataset == 'pendigits': x, y = load_pendigits('data/pendigits') elif args.dataset == 'reutersidf10k': # recommends: n_clusters=4, update_interval=20 x, y = load_reuters('data/reuters') elif args.dataset == 'mydata': # recommends: n_clusters=4, update_interval=20 x, y = load_mydata(path = './data/mydata') if args.update_interval == 0: # one epoch args.update_interval = int(x.shape[0]/args.batch_size) # Define IDEC model idec = IDEC(dims=[x.shape[-1], 500, 500, 2000, 18], n_clusters=args.n_clusters) #plot_model(idec.model, to_file='idec_model.png', show_shapes=True) idec.model.summary() t0 = time() # Pretrain autoencoders before clustering if args.ae_weights is None: idec.pretrain(x, batch_size=args.batch_size, epochs=args.pretrain_epochs, optimizer=optimizer) # begin clustering, time not include pretraining part. idec.compile(loss=['kld', 'mse'], loss_weights=[args.gamma, 1], optimizer=optimizer) idec.fit(x, y=y, batch_size=args.batch_size, tol=args.tol, maxiter=args.maxiter, update_interval=args.update_interval, ae_weights=args.ae_weights, save_dir=args.save_dir) # Show the final results y_pred = idec.y_pred print(y_pred) print('acc:', cluster_acc(y, y_pred)) print('clustering time: %d seconds.' % int(time() - t0)) embed = idec.encoder.predict(x) year = 1999 emd_file = open("deep_embedding", 'w') for embedding in embed: emd_file.write(str(year)+':') for vals in embedding: emd_file.write('\t'+str(vals)) emd_file.write('\n') year+=1 emd_file.close() year = 1999 cl_file = open("deep_clustering", 'w') for cl in y_pred: cl_file.write(str(year)+'\t'+str(cl)) emd_file.write('\n') year+=1 cl_file.close()
""" Toy implementation for Improved Deep Embedded Clustering as described in paper: <NAME>, <NAME>, <NAME>, <NAME>. Improved Deep Embedded Clustering with Local Structure Preservation. IJCAI 2017. The Autoencoder is pretrained directly in an end-to-end manner, NOT greedy layer-wise training. So the results are different with what reported in the paper. Usage: No pretrained autoencoder weights available: python IDEC.py mnist python IDEC.py usps python IDEC.py reutersidf10k --n_clusters 4 Weights of Pretrained autoencoder for mnist are in './ae_weights/mnist_ae_weights.h5': python IDEC.py mnist --ae_weights ./ae_weights/mnist_ae_weights.h5 Author: <NAME>. 2017.4.30 """ from time import time import numpy as np from keras.models import Model from keras.optimizers import SGD from keras.utils.vis_utils import plot_model from sklearn.cluster import KMeans from sklearn import metrics from DEC import cluster_acc, ClusteringLayer, autoencoder class IDEC(object): def __init__(self, dims, n_clusters=4, alpha=1.0): super(IDEC, self).__init__() self.dims = dims self.input_dim = dims[0] self.n_stacks = len(self.dims) - 1 self.n_clusters = n_clusters self.alpha = alpha self.autoencoder = autoencoder(self.dims) hidden = self.autoencoder.get_layer(name='encoder_%d' % (self.n_stacks - 1)).output self.encoder = Model(inputs=self.autoencoder.input, outputs=hidden) # prepare IDEC model clustering_layer = ClusteringLayer(self.n_clusters, alpha=self.alpha, name='clustering')(hidden) self.model = Model(inputs=self.autoencoder.input, outputs=[clustering_layer, self.autoencoder.output]) self.pretrained = False self.centers = [] self.y_pred = [] def pretrain(self, x, batch_size=256, epochs=200, optimizer='adam'): print('...Pretraining...') self.autoencoder.compile(loss='mse', optimizer=optimizer) # SGD(lr=0.01, momentum=0.9), self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs) self.autoencoder.save_weights('ae_weights.h5') print('Pretrained weights are saved to ./ae_weights.h5') self.pretrained = True def load_weights(self, weights_path): # load weights of IDEC model self.model.load_weights(weights_path) def extract_feature(self, x): # extract features from before clustering layer return self.encoder.predict(x) def predict_clusters(self, x): # predict cluster labels using the output of clustering layer q, _ = self.model.predict(x, verbose=0) return q.argmax(1) @staticmethod def target_distribution(q): # target distribution P which enhances the discrimination of soft label Q weight = q ** 2 / q.sum(0) return (weight.T / weight.sum(1)).T def compile(self, loss=['kld', 'mse'], loss_weights=[1, 1], optimizer='adam'): self.model.compile(loss=loss, loss_weights=loss_weights, optimizer=optimizer) def fit(self, x, y=None, batch_size=256, maxiter=2e4, tol=1e-3, update_interval=140, ae_weights=None, save_dir='./results/idec'): print('Update interval', update_interval) save_interval = int(x.shape[0] / batch_size) * 5 # 5 epochs save_interval = 50 print('Save interval', save_interval) # Step 1: pretrain if not self.pretrained and ae_weights is None: print('...pretraining autoencoders using default hyper-parameters:') print(' optimizer=\'adam\'; epochs=200') self.pretrain(x, batch_size) self.pretrained = True elif ae_weights is not None: self.autoencoder.load_weights(ae_weights) print('ae_weights is loaded successfully.') # Step 2: initialize cluster centers using k-means print('Initializing cluster centers with k-means.') kmeans = KMeans(n_clusters=self.n_clusters, n_init=4) self.y_pred = kmeans.fit_predict(self.encoder.predict(x)) y_pred_last = np.copy(self.y_pred) self.model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_]) # Step 3: deep clustering # logging file import csv, os if not os.path.exists(save_dir): os.makedirs(save_dir) logfile = open(save_dir + '/idec_log.csv', 'w') logwriter = csv.DictWriter(logfile, fieldnames=['iter', 'acc', 'nmi', 'ari', 'L', 'Lc', 'Lr']) logwriter.writeheader() loss = [0, 0, 0] index = 0 for ite in range(int(maxiter)): if ite % update_interval == 0: q, _ = self.model.predict(x, verbose=0) p = self.target_distribution(q) # update the auxiliary target distribution p # evaluate the clustering performance self.y_pred = q.argmax(1) if y is not None: acc = np.round(cluster_acc(y, self.y_pred), 5) nmi = np.round(metrics.normalized_mutual_info_score(y, self.y_pred), 5) ari = np.round(metrics.adjusted_rand_score(y, self.y_pred), 5) loss = np.round(loss, 5) logwriter.writerow(dict(iter=ite, acc=acc, nmi=nmi, ari=ari, L=loss[0], Lc=loss[1], Lr=loss[2])) print('Iter-%d: ACC= %.4f, NMI= %.4f, ARI= %.4f; L= %.5f, Lc= %.5f, Lr= %.5f' % (ite, acc, nmi, ari, loss[0], loss[1], loss[2])) # check stop criterion delta_label = np.sum(self.y_pred != y_pred_last).astype(np.float32) / self.y_pred.shape[0] y_pred_last = np.copy(self.y_pred) if ite > 0 and delta_label < tol: print('delta_label ', delta_label, '< tol ', tol) print('Reached tolerance threshold. Stopping training.') logfile.close() break # train on batch if (index + 1) * batch_size > x.shape[0]: loss = self.model.train_on_batch(x=x[index * batch_size::], y=[p[index * batch_size::], x[index * batch_size::]]) index = 0 else: loss = self.model.train_on_batch(x=x[index * batch_size:(index + 1) * batch_size], y=[p[index * batch_size:(index + 1) * batch_size], x[index * batch_size:(index + 1) * batch_size]]) index += 1 # save intermediate model if ite % save_interval == 0: # save IDEC model checkpoints print('saving model to: ' + save_dir + '/IDEC_model_' + str(ite) + '.h5') self.model.save_weights(save_dir + '/IDEC_model_' + str(ite) + '.h5') ite += 1 # save the trained model logfile.close() print('saving model to: ' + save_dir + '/IDEC_model_final.h5') self.model.save_weights(save_dir + '/IDEC_model_final.h5') return self.y_pred if __name__ == "__main__": # setting the hyper parameters import argparse parser = argparse.ArgumentParser(description='train', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('dataset', default='mydata', choices=['mnist', 'usps', 'reutersidf10k', 'pendigits', 'mydata']) parser.add_argument('--n_clusters', default=4, type=int) parser.add_argument('--batch_size', default=256, type=int) parser.add_argument('--maxiter', default=2e4, type=int) parser.add_argument('--pretrain_epochs', default=200, type=int) parser.add_argument('--gamma', default=0.1, type=float, help='coefficient of clustering loss') parser.add_argument('--update_interval', default=1, type=int) parser.add_argument('--tol', default=0.001, type=float) parser.add_argument('--ae_weights', default=None) parser.add_argument('--save_dir', default='results/idec') args = parser.parse_args() print(args) # load dataset optimizer = 'adam' # SGD(lr=0.01, momentum=0.99) from datasets import load_mnist, load_reuters, load_usps, load_pendigits, load_mydata if args.dataset == 'mnist': # recommends: n_clusters=10, update_interval=140 x, y = load_mnist() elif args.dataset == 'usps': # recommends: n_clusters=10, update_interval=30 x, y = load_usps('data/usps') elif args.dataset == 'pendigits': x, y = load_pendigits('data/pendigits') elif args.dataset == 'reutersidf10k': # recommends: n_clusters=4, update_interval=20 x, y = load_reuters('data/reuters') elif args.dataset == 'mydata': # recommends: n_clusters=4, update_interval=20 x, y = load_mydata(path = './data/mydata') if args.update_interval == 0: # one epoch args.update_interval = int(x.shape[0]/args.batch_size) # Define IDEC model idec = IDEC(dims=[x.shape[-1], 500, 500, 2000, 18], n_clusters=args.n_clusters) #plot_model(idec.model, to_file='idec_model.png', show_shapes=True) idec.model.summary() t0 = time() # Pretrain autoencoders before clustering if args.ae_weights is None: idec.pretrain(x, batch_size=args.batch_size, epochs=args.pretrain_epochs, optimizer=optimizer) # begin clustering, time not include pretraining part. idec.compile(loss=['kld', 'mse'], loss_weights=[args.gamma, 1], optimizer=optimizer) idec.fit(x, y=y, batch_size=args.batch_size, tol=args.tol, maxiter=args.maxiter, update_interval=args.update_interval, ae_weights=args.ae_weights, save_dir=args.save_dir) # Show the final results y_pred = idec.y_pred print(y_pred) print('acc:', cluster_acc(y, y_pred)) print('clustering time: %d seconds.' % int(time() - t0)) embed = idec.encoder.predict(x) year = 1999 emd_file = open("deep_embedding", 'w') for embedding in embed: emd_file.write(str(year)+':') for vals in embedding: emd_file.write('\t'+str(vals)) emd_file.write('\n') year+=1 emd_file.close() year = 1999 cl_file = open("deep_clustering", 'w') for cl in y_pred: cl_file.write(str(year)+'\t'+str(cl)) emd_file.write('\n') year+=1 cl_file.close()
en
0.633961
Toy implementation for Improved Deep Embedded Clustering as described in paper: <NAME>, <NAME>, <NAME>, <NAME>. Improved Deep Embedded Clustering with Local Structure Preservation. IJCAI 2017. The Autoencoder is pretrained directly in an end-to-end manner, NOT greedy layer-wise training. So the results are different with what reported in the paper. Usage: No pretrained autoencoder weights available: python IDEC.py mnist python IDEC.py usps python IDEC.py reutersidf10k --n_clusters 4 Weights of Pretrained autoencoder for mnist are in './ae_weights/mnist_ae_weights.h5': python IDEC.py mnist --ae_weights ./ae_weights/mnist_ae_weights.h5 Author: <NAME>. 2017.4.30 # prepare IDEC model # SGD(lr=0.01, momentum=0.9), # load weights of IDEC model # extract features from before clustering layer # predict cluster labels using the output of clustering layer # target distribution P which enhances the discrimination of soft label Q # 5 epochs # Step 1: pretrain # Step 2: initialize cluster centers using k-means # Step 3: deep clustering # logging file # update the auxiliary target distribution p # evaluate the clustering performance # check stop criterion # train on batch # save intermediate model # save IDEC model checkpoints # save the trained model # setting the hyper parameters # load dataset # SGD(lr=0.01, momentum=0.99) # recommends: n_clusters=10, update_interval=140 # recommends: n_clusters=10, update_interval=30 # recommends: n_clusters=4, update_interval=20 # recommends: n_clusters=4, update_interval=20 # one epoch # Define IDEC model #plot_model(idec.model, to_file='idec_model.png', show_shapes=True) # Pretrain autoencoders before clustering # begin clustering, time not include pretraining part. # Show the final results
2.692522
3
Archives/Model Testing/Testing_2.py
sukumarh/Helio-Learning
2
6613324
import datetime as dt from Helper_Functions import File_Ops as File_IO from Helper_Functions import Plotting_Ops as Plotter from Helper_Functions import Dataset_Ops as Data def main(): print('Model Testing') start_time_train = dt.datetime(2003, 1, 1, 0, 0, 0) stop_time_train = dt.datetime(2003, 1, 10, 0, 0, 0) directory = 'Processed_Data/2004/01_01.p' test_dataset, is_success = File_IO.read_data(directory) file_name = 'Trained_Models/svr_multi_feature_1.p' data, is_read_successful = File_IO.read_model(file_name) if is_read_successful: print('Model retrieved') if data[0] == 'single_svr': y_test = test_dataset[:, 0].T[: 20000] X_test = test_dataset[:, 2:][: 20000] svr_model = data[1] # y_hat = svr.fit(X_train, y_train).predict(X_train) # Plotter.plot_svr(X_train, y_hat, y_train, svr, svr.kernel, 'm', 'r', # [min(X_train) - 0.1, max(X_train) + 0.1, min(y_train) - 1, max(y_train) + 1]) y_hat = svr_model.predict(X_test) test_score = svr_model.score(X_test, y_test) print(f'Test Score = {test_score}') # Plotter.plot_svr(X_test, y_hat, y_test, svr, svr.kernel, 'm', 'b', mode='test') # elif data[0] == 'Non_Linear_Reg_SVR_Tuned_Splitted_Dataset': # [clf_neg, clf_pos, svr_pos, svr_neg] = data[1] # X_test_pos, X_test_neg, y_test_pos, y_test_neg = Data.split_pos_and_neg_set(X_test, y_test) # # y_hat_neg = svr_neg.predict(X_test_neg) # y_hat_pos = svr_pos.predict(X_test_pos) # # test_score_neg = svr_neg.score(X_test_neg, y_test_neg) # test_score_pos = svr_pos.score(X_test_pos, y_test_pos) # # print(f'Positive Test Score = {test_score_pos}') # print(f'Negative Test Score = {test_score_neg}') # # Plotter.plot_svr_combined([X_test_neg, X_test_pos], [y_hat_neg, y_hat_pos], [y_test_neg, y_test_pos], # [svr_neg.kernel, svr_pos.kernel], ['m', 'g'], ['b', 'b'], # [svr_neg, svr_pos], # mode='test') if __name__ == '__main__': main()
import datetime as dt from Helper_Functions import File_Ops as File_IO from Helper_Functions import Plotting_Ops as Plotter from Helper_Functions import Dataset_Ops as Data def main(): print('Model Testing') start_time_train = dt.datetime(2003, 1, 1, 0, 0, 0) stop_time_train = dt.datetime(2003, 1, 10, 0, 0, 0) directory = 'Processed_Data/2004/01_01.p' test_dataset, is_success = File_IO.read_data(directory) file_name = 'Trained_Models/svr_multi_feature_1.p' data, is_read_successful = File_IO.read_model(file_name) if is_read_successful: print('Model retrieved') if data[0] == 'single_svr': y_test = test_dataset[:, 0].T[: 20000] X_test = test_dataset[:, 2:][: 20000] svr_model = data[1] # y_hat = svr.fit(X_train, y_train).predict(X_train) # Plotter.plot_svr(X_train, y_hat, y_train, svr, svr.kernel, 'm', 'r', # [min(X_train) - 0.1, max(X_train) + 0.1, min(y_train) - 1, max(y_train) + 1]) y_hat = svr_model.predict(X_test) test_score = svr_model.score(X_test, y_test) print(f'Test Score = {test_score}') # Plotter.plot_svr(X_test, y_hat, y_test, svr, svr.kernel, 'm', 'b', mode='test') # elif data[0] == 'Non_Linear_Reg_SVR_Tuned_Splitted_Dataset': # [clf_neg, clf_pos, svr_pos, svr_neg] = data[1] # X_test_pos, X_test_neg, y_test_pos, y_test_neg = Data.split_pos_and_neg_set(X_test, y_test) # # y_hat_neg = svr_neg.predict(X_test_neg) # y_hat_pos = svr_pos.predict(X_test_pos) # # test_score_neg = svr_neg.score(X_test_neg, y_test_neg) # test_score_pos = svr_pos.score(X_test_pos, y_test_pos) # # print(f'Positive Test Score = {test_score_pos}') # print(f'Negative Test Score = {test_score_neg}') # # Plotter.plot_svr_combined([X_test_neg, X_test_pos], [y_hat_neg, y_hat_pos], [y_test_neg, y_test_pos], # [svr_neg.kernel, svr_pos.kernel], ['m', 'g'], ['b', 'b'], # [svr_neg, svr_pos], # mode='test') if __name__ == '__main__': main()
en
0.200436
# y_hat = svr.fit(X_train, y_train).predict(X_train) # Plotter.plot_svr(X_train, y_hat, y_train, svr, svr.kernel, 'm', 'r', # [min(X_train) - 0.1, max(X_train) + 0.1, min(y_train) - 1, max(y_train) + 1]) # Plotter.plot_svr(X_test, y_hat, y_test, svr, svr.kernel, 'm', 'b', mode='test') # elif data[0] == 'Non_Linear_Reg_SVR_Tuned_Splitted_Dataset': # [clf_neg, clf_pos, svr_pos, svr_neg] = data[1] # X_test_pos, X_test_neg, y_test_pos, y_test_neg = Data.split_pos_and_neg_set(X_test, y_test) # # y_hat_neg = svr_neg.predict(X_test_neg) # y_hat_pos = svr_pos.predict(X_test_pos) # # test_score_neg = svr_neg.score(X_test_neg, y_test_neg) # test_score_pos = svr_pos.score(X_test_pos, y_test_pos) # # print(f'Positive Test Score = {test_score_pos}') # print(f'Negative Test Score = {test_score_neg}') # # Plotter.plot_svr_combined([X_test_neg, X_test_pos], [y_hat_neg, y_hat_pos], [y_test_neg, y_test_pos], # [svr_neg.kernel, svr_pos.kernel], ['m', 'g'], ['b', 'b'], # [svr_neg, svr_pos], # mode='test')
2.601698
3
python3/even_tree.py
ahavrylyuk/hackerrank
0
6613325
<filename>python3/even_tree.py #! /usr/bin/env python from collections import defaultdict def dfs_visit(parent, g, start): for v in g[start]: counts[start].add(v) if v not in parent: parent.append(v) subtree = dfs_visit(parent, g, v) for u in subtree: if u != start: counts[start].add(u) return counts[start] def read_line(): return (int(x) for x in input().split()) if __name__ == '__main__': g = defaultdict(set) n, m = read_line() for _ in range(m): vi, ui = read_line() g[vi].add(ui) g[ui].add(vi) start = next(iter(g)) parent = [start] counts = defaultdict(set) dfs_visit(parent, g, start) from pprint import pprint pprint(counts) print(sum(1 for x in counts.values() if len(x) % 2 is 0))
<filename>python3/even_tree.py #! /usr/bin/env python from collections import defaultdict def dfs_visit(parent, g, start): for v in g[start]: counts[start].add(v) if v not in parent: parent.append(v) subtree = dfs_visit(parent, g, v) for u in subtree: if u != start: counts[start].add(u) return counts[start] def read_line(): return (int(x) for x in input().split()) if __name__ == '__main__': g = defaultdict(set) n, m = read_line() for _ in range(m): vi, ui = read_line() g[vi].add(ui) g[ui].add(vi) start = next(iter(g)) parent = [start] counts = defaultdict(set) dfs_visit(parent, g, start) from pprint import pprint pprint(counts) print(sum(1 for x in counts.values() if len(x) % 2 is 0))
ru
0.148623
#! /usr/bin/env python
3.584033
4
Duke/scripts/main.py
remram44/duke
0
6613326
<reponame>remram44/duke import numpy as np from Duke.agg_functions import * from Duke.dataset_descriptor import DatasetDescriptor from Duke.utils import mean_of_rows def main( dataset_path='/vectorizationdata/KnowledgeGraph2Vec/duke-dev/data/185_baseball.csv', tree_path='../ontologies/class-tree_dbpedia_2016-10.json', embedding_path='/vectorizationdata/KnowledgeGraph2Vec/duke-dev/embeddings/wiki2vec/en.model', row_agg_func=mean_of_rows, tree_agg_func=parent_children_funcs(np.mean, max), source_agg_func=mean_of_rows, max_num_samples = 1e6, verbose=True, ): duke = DatasetDescriptor( dataset=dataset_path, tree=tree_path, embedding=embedding_path, row_agg_func=row_agg_func, tree_agg_func=tree_agg_func, source_agg_func=source_agg_func, max_num_samples=max_num_samples, verbose=verbose, ) print('initialized duke dataset descriptor \n') out = duke.get_top_n_words(10) print("The top N=%d words are"%10) print(out) return duke.get_dataset_description() if __name__ == '__main__': main()
import numpy as np from Duke.agg_functions import * from Duke.dataset_descriptor import DatasetDescriptor from Duke.utils import mean_of_rows def main( dataset_path='/vectorizationdata/KnowledgeGraph2Vec/duke-dev/data/185_baseball.csv', tree_path='../ontologies/class-tree_dbpedia_2016-10.json', embedding_path='/vectorizationdata/KnowledgeGraph2Vec/duke-dev/embeddings/wiki2vec/en.model', row_agg_func=mean_of_rows, tree_agg_func=parent_children_funcs(np.mean, max), source_agg_func=mean_of_rows, max_num_samples = 1e6, verbose=True, ): duke = DatasetDescriptor( dataset=dataset_path, tree=tree_path, embedding=embedding_path, row_agg_func=row_agg_func, tree_agg_func=tree_agg_func, source_agg_func=source_agg_func, max_num_samples=max_num_samples, verbose=verbose, ) print('initialized duke dataset descriptor \n') out = duke.get_top_n_words(10) print("The top N=%d words are"%10) print(out) return duke.get_dataset_description() if __name__ == '__main__': main()
none
1
2.60565
3
scripts/libplatform.py
sjl3110/TF-M
0
6613327
Path = '/home/sjl/work/tf-m/libplatform_b' total = 0 for line in open(Path,"r"): content = line.split() total = total + int(content[3]) print("total = %d" % total) Path2 = '/home/sjl/work/tf-m/libplatform' total = 0 for line in open(Path2,"r"): if line.find("otal") > 0: content = line.split() total = total + int(content[1]) print("total = %d" % total)
Path = '/home/sjl/work/tf-m/libplatform_b' total = 0 for line in open(Path,"r"): content = line.split() total = total + int(content[3]) print("total = %d" % total) Path2 = '/home/sjl/work/tf-m/libplatform' total = 0 for line in open(Path2,"r"): if line.find("otal") > 0: content = line.split() total = total + int(content[1]) print("total = %d" % total)
none
1
2.771983
3
MultiAtlasSegmenter/LabelPropagation/LabelPropagationAndSelectionEvaluation.py
mabelzunce/MuscleSegmentation
0
6613328
#! python3 # This script compares multiple label propagation and selection cases for all the cases in a library. from __future__ import print_function import SimpleITK as sitk import numpy as np import sys import os import NormalizedCrossCorrelationMetrics as NCC import MajorityVoting as MV import PostprocessingLabels as PP from DynamicLabelFusionWithSimilarityWeights import DynamicLabelFusionWithLocalSimilarityWeights as DynamicLocalLabelling from DynamicLabelFusionWithSimilarityWeights import DynamicLabelFusionWithSimilarityWeights as DynamicLabelling ############################### TARGET FOLDER ################################### # The target folder needs to have all the files that are saved by the plugin when intermediates files are saved. libraryVersion = 'V1.2' segType = 'BSplineStandardGradDesc_NMI_2000iters_2000samples' numberOfSelectedAtlases = 19 # I need all the atlases, but if the segmentation was ran in dbug mode I'll have all anyways. excludeFemurs = True # The femurs have been segmented in a few atlases, but because they are only in a few of them, it # introduces erros in the undecided label. numLabels = 11 if excludeFemurs: numLabels = 9 numLabelWithoutUndecided = numLabels - 1 maskedRegistration = True libraryCases = '' libraryPath = 'D:\\Martin\\Segmentation\\AtlasLibrary\\' + libraryVersion + '\\NativeResolutionAndSize2\\' targetPath = 'D:\\MuscleSegmentationEvaluation\\SegmentationWithPython\\' + libraryVersion + '\\TestWithLibrary\\Nonrigid{0}_N{1}_MaxProb_Mask\\'.format(segType, numberOfSelectedAtlases) targetPath = 'D:\\MuscleSegmentationEvaluation\\SegmentationWithPython\\' + libraryVersion + '\\TestWithLibrary\\{0}_N{1}_{2}\\'.format(segType, numberOfSelectedAtlases, maskedRegistration) outputPath = 'D:\\MuscleSegmentationEvaluation\\SegmentationWithPython\\' + libraryVersion + '\\TestWithLibrary\\{0}_N{1}_{2}_LibSize\\'.format(segType, numberOfSelectedAtlases, maskedRegistration) # Exponential weights: if not os.path.exists(outputPath): os.mkdir(outputPath) expWeight=2 outputPath = outputPath + '\\expweightFusion_{0}\\'.format(expWeight) if not os.path.exists(outputPath): os.mkdir(outputPath) extensionImages = 'mhd' regImagesFilenameEnd = '_to_target.mhd' regLabelsFilenameEnd = '_to_target_labels.mhd' # Look for the raw files in the library: dirWithCases = os.listdir(targetPath) #dirWithCases = dirWithCases[10:] atlasImagesNames = [] atlasLabelsNames = [] # Label fusion strategies to include in the analysis processWithMajorityVoting = True processWithSTAPLES = True processWithGlobalWeightedVoting = True processWithRoiWeightedVoting = True # Name of atlases in order to be included: atlasNamesInOrderOfInclusion = ['ID00001', 'ID00002', 'ID00003', 'ID00005', 'ID00006', 'ID00008', 'ID00010', 'ID00011', 'ID00013', 'ID00014', 'ID00021', 'ID00029', 'ID00061','L0511645', '7390413', '7386347', 'L0045955', 'L0324841','L0364068','L0029976'] # Instead of using all cases from dirWithCases, I select a few of them for the library: numberOfCases = 8 # Process multiple number of cases selection: #[4,6,8,10,12,14,17]: for numberOfCases in [15, 17]: atlasesLibrary = atlasNamesInOrderOfInclusion[0:numberOfCases] # For the segmentation, we evaluate all of them: for filenameCases in dirWithCases: outputPathThisLibrarySize = outputPath + 'LibrarySize{0}\\'.format(numberOfCases) if not os.path.exists(outputPathThisLibrarySize): os.mkdir(outputPathThisLibrarySize) if os.path.isdir(targetPath + filenameCases): caseName = filenameCases # Data path were all the registered images: dataPath = targetPath + filenameCases + "\\" # Output path inside this folder: outputPathThisCase = outputPathThisLibrarySize + caseName + "\\" if not os.path.exists(outputPathThisCase): os.mkdir(outputPathThisCase) # Create a log file: log = open(outputPathThisCase + 'log.txt', 'w') # First read the target image: targetImage = sitk.ReadImage(dataPath + "input_registration.mhd") # get labels from library path: targetLabels = sitk.ReadImage(libraryPath + caseName + "_labels.mhd") # Look for the header files of the registered images: files = os.listdir(dataPath) # Get the images and labels filenames: registeredFilenames = [] registeredLabelsFilenames = [] for filename in files: if filename.endswith(regImagesFilenameEnd): i = filename.find(regImagesFilenameEnd) nameThisAtlas = filename[0:i] filenameLabels = nameThisAtlas + regLabelsFilenameEnd if os.path.isfile(dataPath + filenameLabels) & (nameThisAtlas in atlasesLibrary): registeredFilenames.append(filename) registeredLabelsFilenames.append(filenameLabels) # Need to create a dictionary with all the registered images (too much memory?): registeredImage = [] labelsImage = [] for i in range(0, len(registeredFilenames)): # Read image: registeredImage.append(sitk.ReadImage(dataPath + registeredFilenames[i])) # Call the local similarity metric and save the image: labelImage = sitk.ReadImage(dataPath + registeredLabelsFilenames[i]) # Remove femurs if indicated: if excludeFemurs: maskFilter = sitk.MaskImageFilter() maskFilter.SetOutsideValue(0) maskFilter.SetMaskingValue(9) labelImage = maskFilter.Execute(labelImage, labelImage) maskFilter.SetMaskingValue(10) labelImage = maskFilter.Execute(labelImage, labelImage) labelsImage.append(labelImage) # Select the N most similar cases: # Get similarity weights for each label mask for each atlas lnccValues = np.zeros(len(registeredImage)) # Get a similarity metric for each label: for i in range(0, len(registeredImage)): # Using the similarity of the full image: maskThisLabel = sitk.GreaterEqual(targetImage, 0) # Use all the voxels. lncc = NCC.RoiNormalizedCrossCorrelationAsInITK(targetImage, registeredImage[i], maskThisLabel) lnccValues[i] = lncc # Sort indices for atlas selection and voting: indicesSorted = np.argsort(lnccValues) # Write log: log.write('Similarity metric values (lncc): {0}\n'.format(lncc)) selectedAtlasesValues = range(2, len(registeredImage)) jaccard = np.zeros((len(selectedAtlasesValues), numLabels)) dice = np.zeros((len(selectedAtlasesValues), numLabels)) volumeSimilarity = np.zeros((len(selectedAtlasesValues), numLabels)) fn = np.zeros((len(selectedAtlasesValues), numLabels)) fp = np.zeros((len(selectedAtlasesValues), numLabels)) jaccardAll = np.zeros((len(selectedAtlasesValues), 1)) diceAll = np.zeros((len(selectedAtlasesValues), 1)) volumeSimilarityAll = np.zeros((len(selectedAtlasesValues), 1)) fnAll = np.zeros((len(selectedAtlasesValues), 1)) fpAll = np.zeros((len(selectedAtlasesValues), 1)) if processWithMajorityVoting: j = 0 for numSelectedAtlases in selectedAtlasesValues: # Selected atlases: indicesSelected = indicesSorted[0:numSelectedAtlases] # Now do the atlas selection (I can't access given indices so need a for): propagatedLabels = list() for index in indicesSelected: propagatedLabels.append(labelsImage[index]) ##################### LABEL FUSION WITH MAJORITY VOTING ################################# outputLabels = sitk.LabelVoting(propagatedLabels, numLabels) # After label voting I will have undecided voxels, add an undecided solving step: outputLabels = MV.SetUndecidedVoxelsUsingDistances(outputLabels, numLabels) #Get largest connceted regions for each label: #outputLabels = PP.FilterUnconnectedRegion(outputLabels, numLabels-1) # Not necessary for the undecided label log.write('Indices of selected atlases: {0}\n'.format(indicesSelected)) # Write the results: sitk.WriteImage(outputLabels, outputPathThisCase + "segmentedImage_MajVot_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # First get a general metric: overlap_measures_filter.Execute(targetLabels, outputLabels) jaccardAll[j] = overlap_measures_filter.GetJaccardCoefficient() diceAll[j] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarityAll[j] = overlap_measures_filter.GetVolumeSimilarity() fnAll[j] = overlap_measures_filter.GetFalseNegativeError() fpAll[j] = overlap_measures_filter.GetFalsePositiveError() log.write('Dice All Labels: {0}\n'.format(diceAll[j])) # Overlap measures for each label: for labelIndex in range(0, numLabels): # The index needs to be icnreased as the base number is 1. overlap_measures_filter.Execute(targetLabels == (labelIndex+1), outputLabels == (labelIndex+1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j+1 np.savetxt(outputPathThisCase + "jaccard.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "dice.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarity.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fn.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fp.csv", fp, delimiter=",") np.savetxt(outputPathThisCase + "jaccardAll.csv", jaccardAll, delimiter=",") np.savetxt(outputPathThisCase + "diceAll.csv", diceAll, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarityAll.csv", volumeSimilarityAll, delimiter=",") np.savetxt(outputPathThisCase + "fnAll.csv", fnAll, delimiter=",") np.savetxt(outputPathThisCase + "fpAll.csv", fpAll, delimiter=",") # Now repeat for STAPLEs: if processWithSTAPLES: j = 0 for numSelectedAtlases in range(2, len(registeredImage)): # Selected atlases: indicesSelected = indicesSorted[0:numSelectedAtlases] # Now do the atlas selection (I can't access given indices so need a for): propagatedLabels = list() for index in indicesSelected: propagatedLabels.append(labelsImage[index]) ##################### LABEL FUSION WITH MAJORITY VOTING ################################# multilabelStaple = sitk.MultiLabelSTAPLEImageFilter() multilabelStaple.SetTerminationUpdateThreshold(1e-4) multilabelStaple.SetMaximumNumberOfIterations(30) multilabelStaple.SetLabelForUndecidedPixels(numLabels) outputLabelsSTAPLES = multilabelStaple.Execute(propagatedLabels) log.write('Indices of selected atlases STAPLES: {0}\n'.format(indicesSelected)) sitk.WriteImage(outputLabelsSTAPLES, outputPathThisCase + "segmentedImageSTAPLES_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # Overlap measures for each label: for labelIndex in range(0, numLabels): overlap_measures_filter.Execute(targetLabels == (labelIndex+1), outputLabelsSTAPLES == (labelIndex+1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('STAPLES Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j + 1 np.savetxt(outputPathThisCase + "jaccardSTAPLES.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "diceSTAPLES.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilaritySTAPLES.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fnSTAPLES.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fpSTAPLES.csv", fp, delimiter=",") ##################### LABEL FUSION WITH DYNAMIC WEIGHTING VOTING ################################# if processWithGlobalWeightedVoting: j = 0 for numSelectedAtlases in selectedAtlasesValues: # Call function for global weighted voting: registeredAtlases = {'image': registeredImage, 'labels': labelsImage} outputLabelsGWV = DynamicLabelling(targetImage, registeredAtlases, numLabelWithoutUndecided, numSelectedAtlases=numSelectedAtlases, expWeight=expWeight, useOnlyLabelVoxels=True, outputPath=outputPathThisCase, debug=0) # Write the results: sitk.WriteImage(outputLabelsGWV, outputPathThisCase + "segmentedImageGWV_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # Overlap measures for each label: for labelIndex in range(0, numLabelWithoutUndecided): # The index needs to be icnreased as the base number is 1. overlap_measures_filter.Execute(targetLabels == (labelIndex + 1), outputLabelsGWV == (labelIndex + 1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('GWV Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j + 1 np.savetxt(outputPathThisCase + "jaccardGWV.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "diceGWV.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarityGWV.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fnGWV.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fpGWV.csv", fp, delimiter=",") if processWithRoiWeightedVoting: j = 0 for numSelectedAtlases in selectedAtlasesValues: # Call function for global weighted voting: registeredAtlases = {'image': registeredImage, 'labels': labelsImage} outputLabelsLWV = DynamicLocalLabelling(targetImage, registeredAtlases, numLabelWithoutUndecided, numSelectedAtlases = numSelectedAtlases, expWeight=expWeight, outputPath=outputPathThisCase, debug = 0) # Write the results: sitk.WriteImage(outputLabelsLWV, outputPathThisCase + "segmentedImageRWV_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # Overlap measures for each label: for labelIndex in range(0, numLabelWithoutUndecided): # The index needs to be icnreased as the base number is 1. overlap_measures_filter.Execute(targetLabels == (labelIndex + 1), outputLabelsLWV == (labelIndex + 1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('LWV Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j + 1 np.savetxt(outputPathThisCase + "jaccardRWV.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "diceRWV.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarityRWV.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fnRWV.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fpRWV.csv", fp, delimiter=",") log.close()
#! python3 # This script compares multiple label propagation and selection cases for all the cases in a library. from __future__ import print_function import SimpleITK as sitk import numpy as np import sys import os import NormalizedCrossCorrelationMetrics as NCC import MajorityVoting as MV import PostprocessingLabels as PP from DynamicLabelFusionWithSimilarityWeights import DynamicLabelFusionWithLocalSimilarityWeights as DynamicLocalLabelling from DynamicLabelFusionWithSimilarityWeights import DynamicLabelFusionWithSimilarityWeights as DynamicLabelling ############################### TARGET FOLDER ################################### # The target folder needs to have all the files that are saved by the plugin when intermediates files are saved. libraryVersion = 'V1.2' segType = 'BSplineStandardGradDesc_NMI_2000iters_2000samples' numberOfSelectedAtlases = 19 # I need all the atlases, but if the segmentation was ran in dbug mode I'll have all anyways. excludeFemurs = True # The femurs have been segmented in a few atlases, but because they are only in a few of them, it # introduces erros in the undecided label. numLabels = 11 if excludeFemurs: numLabels = 9 numLabelWithoutUndecided = numLabels - 1 maskedRegistration = True libraryCases = '' libraryPath = 'D:\\Martin\\Segmentation\\AtlasLibrary\\' + libraryVersion + '\\NativeResolutionAndSize2\\' targetPath = 'D:\\MuscleSegmentationEvaluation\\SegmentationWithPython\\' + libraryVersion + '\\TestWithLibrary\\Nonrigid{0}_N{1}_MaxProb_Mask\\'.format(segType, numberOfSelectedAtlases) targetPath = 'D:\\MuscleSegmentationEvaluation\\SegmentationWithPython\\' + libraryVersion + '\\TestWithLibrary\\{0}_N{1}_{2}\\'.format(segType, numberOfSelectedAtlases, maskedRegistration) outputPath = 'D:\\MuscleSegmentationEvaluation\\SegmentationWithPython\\' + libraryVersion + '\\TestWithLibrary\\{0}_N{1}_{2}_LibSize\\'.format(segType, numberOfSelectedAtlases, maskedRegistration) # Exponential weights: if not os.path.exists(outputPath): os.mkdir(outputPath) expWeight=2 outputPath = outputPath + '\\expweightFusion_{0}\\'.format(expWeight) if not os.path.exists(outputPath): os.mkdir(outputPath) extensionImages = 'mhd' regImagesFilenameEnd = '_to_target.mhd' regLabelsFilenameEnd = '_to_target_labels.mhd' # Look for the raw files in the library: dirWithCases = os.listdir(targetPath) #dirWithCases = dirWithCases[10:] atlasImagesNames = [] atlasLabelsNames = [] # Label fusion strategies to include in the analysis processWithMajorityVoting = True processWithSTAPLES = True processWithGlobalWeightedVoting = True processWithRoiWeightedVoting = True # Name of atlases in order to be included: atlasNamesInOrderOfInclusion = ['ID00001', 'ID00002', 'ID00003', 'ID00005', 'ID00006', 'ID00008', 'ID00010', 'ID00011', 'ID00013', 'ID00014', 'ID00021', 'ID00029', 'ID00061','L0511645', '7390413', '7386347', 'L0045955', 'L0324841','L0364068','L0029976'] # Instead of using all cases from dirWithCases, I select a few of them for the library: numberOfCases = 8 # Process multiple number of cases selection: #[4,6,8,10,12,14,17]: for numberOfCases in [15, 17]: atlasesLibrary = atlasNamesInOrderOfInclusion[0:numberOfCases] # For the segmentation, we evaluate all of them: for filenameCases in dirWithCases: outputPathThisLibrarySize = outputPath + 'LibrarySize{0}\\'.format(numberOfCases) if not os.path.exists(outputPathThisLibrarySize): os.mkdir(outputPathThisLibrarySize) if os.path.isdir(targetPath + filenameCases): caseName = filenameCases # Data path were all the registered images: dataPath = targetPath + filenameCases + "\\" # Output path inside this folder: outputPathThisCase = outputPathThisLibrarySize + caseName + "\\" if not os.path.exists(outputPathThisCase): os.mkdir(outputPathThisCase) # Create a log file: log = open(outputPathThisCase + 'log.txt', 'w') # First read the target image: targetImage = sitk.ReadImage(dataPath + "input_registration.mhd") # get labels from library path: targetLabels = sitk.ReadImage(libraryPath + caseName + "_labels.mhd") # Look for the header files of the registered images: files = os.listdir(dataPath) # Get the images and labels filenames: registeredFilenames = [] registeredLabelsFilenames = [] for filename in files: if filename.endswith(regImagesFilenameEnd): i = filename.find(regImagesFilenameEnd) nameThisAtlas = filename[0:i] filenameLabels = nameThisAtlas + regLabelsFilenameEnd if os.path.isfile(dataPath + filenameLabels) & (nameThisAtlas in atlasesLibrary): registeredFilenames.append(filename) registeredLabelsFilenames.append(filenameLabels) # Need to create a dictionary with all the registered images (too much memory?): registeredImage = [] labelsImage = [] for i in range(0, len(registeredFilenames)): # Read image: registeredImage.append(sitk.ReadImage(dataPath + registeredFilenames[i])) # Call the local similarity metric and save the image: labelImage = sitk.ReadImage(dataPath + registeredLabelsFilenames[i]) # Remove femurs if indicated: if excludeFemurs: maskFilter = sitk.MaskImageFilter() maskFilter.SetOutsideValue(0) maskFilter.SetMaskingValue(9) labelImage = maskFilter.Execute(labelImage, labelImage) maskFilter.SetMaskingValue(10) labelImage = maskFilter.Execute(labelImage, labelImage) labelsImage.append(labelImage) # Select the N most similar cases: # Get similarity weights for each label mask for each atlas lnccValues = np.zeros(len(registeredImage)) # Get a similarity metric for each label: for i in range(0, len(registeredImage)): # Using the similarity of the full image: maskThisLabel = sitk.GreaterEqual(targetImage, 0) # Use all the voxels. lncc = NCC.RoiNormalizedCrossCorrelationAsInITK(targetImage, registeredImage[i], maskThisLabel) lnccValues[i] = lncc # Sort indices for atlas selection and voting: indicesSorted = np.argsort(lnccValues) # Write log: log.write('Similarity metric values (lncc): {0}\n'.format(lncc)) selectedAtlasesValues = range(2, len(registeredImage)) jaccard = np.zeros((len(selectedAtlasesValues), numLabels)) dice = np.zeros((len(selectedAtlasesValues), numLabels)) volumeSimilarity = np.zeros((len(selectedAtlasesValues), numLabels)) fn = np.zeros((len(selectedAtlasesValues), numLabels)) fp = np.zeros((len(selectedAtlasesValues), numLabels)) jaccardAll = np.zeros((len(selectedAtlasesValues), 1)) diceAll = np.zeros((len(selectedAtlasesValues), 1)) volumeSimilarityAll = np.zeros((len(selectedAtlasesValues), 1)) fnAll = np.zeros((len(selectedAtlasesValues), 1)) fpAll = np.zeros((len(selectedAtlasesValues), 1)) if processWithMajorityVoting: j = 0 for numSelectedAtlases in selectedAtlasesValues: # Selected atlases: indicesSelected = indicesSorted[0:numSelectedAtlases] # Now do the atlas selection (I can't access given indices so need a for): propagatedLabels = list() for index in indicesSelected: propagatedLabels.append(labelsImage[index]) ##################### LABEL FUSION WITH MAJORITY VOTING ################################# outputLabels = sitk.LabelVoting(propagatedLabels, numLabels) # After label voting I will have undecided voxels, add an undecided solving step: outputLabels = MV.SetUndecidedVoxelsUsingDistances(outputLabels, numLabels) #Get largest connceted regions for each label: #outputLabels = PP.FilterUnconnectedRegion(outputLabels, numLabels-1) # Not necessary for the undecided label log.write('Indices of selected atlases: {0}\n'.format(indicesSelected)) # Write the results: sitk.WriteImage(outputLabels, outputPathThisCase + "segmentedImage_MajVot_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # First get a general metric: overlap_measures_filter.Execute(targetLabels, outputLabels) jaccardAll[j] = overlap_measures_filter.GetJaccardCoefficient() diceAll[j] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarityAll[j] = overlap_measures_filter.GetVolumeSimilarity() fnAll[j] = overlap_measures_filter.GetFalseNegativeError() fpAll[j] = overlap_measures_filter.GetFalsePositiveError() log.write('Dice All Labels: {0}\n'.format(diceAll[j])) # Overlap measures for each label: for labelIndex in range(0, numLabels): # The index needs to be icnreased as the base number is 1. overlap_measures_filter.Execute(targetLabels == (labelIndex+1), outputLabels == (labelIndex+1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j+1 np.savetxt(outputPathThisCase + "jaccard.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "dice.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarity.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fn.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fp.csv", fp, delimiter=",") np.savetxt(outputPathThisCase + "jaccardAll.csv", jaccardAll, delimiter=",") np.savetxt(outputPathThisCase + "diceAll.csv", diceAll, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarityAll.csv", volumeSimilarityAll, delimiter=",") np.savetxt(outputPathThisCase + "fnAll.csv", fnAll, delimiter=",") np.savetxt(outputPathThisCase + "fpAll.csv", fpAll, delimiter=",") # Now repeat for STAPLEs: if processWithSTAPLES: j = 0 for numSelectedAtlases in range(2, len(registeredImage)): # Selected atlases: indicesSelected = indicesSorted[0:numSelectedAtlases] # Now do the atlas selection (I can't access given indices so need a for): propagatedLabels = list() for index in indicesSelected: propagatedLabels.append(labelsImage[index]) ##################### LABEL FUSION WITH MAJORITY VOTING ################################# multilabelStaple = sitk.MultiLabelSTAPLEImageFilter() multilabelStaple.SetTerminationUpdateThreshold(1e-4) multilabelStaple.SetMaximumNumberOfIterations(30) multilabelStaple.SetLabelForUndecidedPixels(numLabels) outputLabelsSTAPLES = multilabelStaple.Execute(propagatedLabels) log.write('Indices of selected atlases STAPLES: {0}\n'.format(indicesSelected)) sitk.WriteImage(outputLabelsSTAPLES, outputPathThisCase + "segmentedImageSTAPLES_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # Overlap measures for each label: for labelIndex in range(0, numLabels): overlap_measures_filter.Execute(targetLabels == (labelIndex+1), outputLabelsSTAPLES == (labelIndex+1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('STAPLES Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j + 1 np.savetxt(outputPathThisCase + "jaccardSTAPLES.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "diceSTAPLES.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilaritySTAPLES.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fnSTAPLES.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fpSTAPLES.csv", fp, delimiter=",") ##################### LABEL FUSION WITH DYNAMIC WEIGHTING VOTING ################################# if processWithGlobalWeightedVoting: j = 0 for numSelectedAtlases in selectedAtlasesValues: # Call function for global weighted voting: registeredAtlases = {'image': registeredImage, 'labels': labelsImage} outputLabelsGWV = DynamicLabelling(targetImage, registeredAtlases, numLabelWithoutUndecided, numSelectedAtlases=numSelectedAtlases, expWeight=expWeight, useOnlyLabelVoxels=True, outputPath=outputPathThisCase, debug=0) # Write the results: sitk.WriteImage(outputLabelsGWV, outputPathThisCase + "segmentedImageGWV_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # Overlap measures for each label: for labelIndex in range(0, numLabelWithoutUndecided): # The index needs to be icnreased as the base number is 1. overlap_measures_filter.Execute(targetLabels == (labelIndex + 1), outputLabelsGWV == (labelIndex + 1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('GWV Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j + 1 np.savetxt(outputPathThisCase + "jaccardGWV.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "diceGWV.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarityGWV.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fnGWV.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fpGWV.csv", fp, delimiter=",") if processWithRoiWeightedVoting: j = 0 for numSelectedAtlases in selectedAtlasesValues: # Call function for global weighted voting: registeredAtlases = {'image': registeredImage, 'labels': labelsImage} outputLabelsLWV = DynamicLocalLabelling(targetImage, registeredAtlases, numLabelWithoutUndecided, numSelectedAtlases = numSelectedAtlases, expWeight=expWeight, outputPath=outputPathThisCase, debug = 0) # Write the results: sitk.WriteImage(outputLabelsLWV, outputPathThisCase + "segmentedImageRWV_{0}.mhd".format(numSelectedAtlases)) # Get metrics: overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter() overlap_measures_filter.SetGlobalDefaultCoordinateTolerance(1e-2) # Overlap measures for each label: for labelIndex in range(0, numLabelWithoutUndecided): # The index needs to be icnreased as the base number is 1. overlap_measures_filter.Execute(targetLabels == (labelIndex + 1), outputLabelsLWV == (labelIndex + 1)) jaccard[j, labelIndex] = overlap_measures_filter.GetJaccardCoefficient() dice[j, labelIndex] = overlap_measures_filter.GetDiceCoefficient() volumeSimilarity[j, labelIndex] = overlap_measures_filter.GetVolumeSimilarity() fn[j, labelIndex] = overlap_measures_filter.GetFalseNegativeError() fp[j, labelIndex] = overlap_measures_filter.GetFalsePositiveError() log.write('LWV Dice Label {0}: {1}\n'.format(labelIndex, dice[j, labelIndex])) j = j + 1 np.savetxt(outputPathThisCase + "jaccardRWV.csv", jaccard, delimiter=",") np.savetxt(outputPathThisCase + "diceRWV.csv", dice, delimiter=",") np.savetxt(outputPathThisCase + "volumeSimilarityRWV.csv", volumeSimilarity, delimiter=",") np.savetxt(outputPathThisCase + "fnRWV.csv", fn, delimiter=",") np.savetxt(outputPathThisCase + "fpRWV.csv", fp, delimiter=",") log.close()
en
0.814213
#! python3 # This script compares multiple label propagation and selection cases for all the cases in a library. ############################### TARGET FOLDER ################################### # The target folder needs to have all the files that are saved by the plugin when intermediates files are saved. # I need all the atlases, but if the segmentation was ran in dbug mode I'll have all anyways. # The femurs have been segmented in a few atlases, but because they are only in a few of them, it # introduces erros in the undecided label. # Exponential weights: # Look for the raw files in the library: #dirWithCases = dirWithCases[10:] # Label fusion strategies to include in the analysis # Name of atlases in order to be included: # Instead of using all cases from dirWithCases, I select a few of them for the library: # Process multiple number of cases selection: #[4,6,8,10,12,14,17]: # For the segmentation, we evaluate all of them: # Data path were all the registered images: # Output path inside this folder: # Create a log file: # First read the target image: # get labels from library path: # Look for the header files of the registered images: # Get the images and labels filenames: # Need to create a dictionary with all the registered images (too much memory?): # Read image: # Call the local similarity metric and save the image: # Remove femurs if indicated: # Select the N most similar cases: # Get similarity weights for each label mask for each atlas # Get a similarity metric for each label: # Using the similarity of the full image: # Use all the voxels. # Sort indices for atlas selection and voting: # Write log: # Selected atlases: # Now do the atlas selection (I can't access given indices so need a for): ##################### LABEL FUSION WITH MAJORITY VOTING ################################# # After label voting I will have undecided voxels, add an undecided solving step: #Get largest connceted regions for each label: #outputLabels = PP.FilterUnconnectedRegion(outputLabels, numLabels-1) # Not necessary for the undecided label # Write the results: # Get metrics: # First get a general metric: # Overlap measures for each label: # The index needs to be icnreased as the base number is 1. # Now repeat for STAPLEs: # Selected atlases: # Now do the atlas selection (I can't access given indices so need a for): ##################### LABEL FUSION WITH MAJORITY VOTING ################################# # Get metrics: # Overlap measures for each label: ##################### LABEL FUSION WITH DYNAMIC WEIGHTING VOTING ################################# # Call function for global weighted voting: # Write the results: # Get metrics: # Overlap measures for each label: # The index needs to be icnreased as the base number is 1. # Call function for global weighted voting: # Write the results: # Get metrics: # Overlap measures for each label: # The index needs to be icnreased as the base number is 1.
1.756758
2
kvack/cli.py
radowit/kvack
0
6613329
"""Console script for kvack.""" import sys from typing import Any, cast import click import toml ConfigType = dict[str, dict[str, dict[str, dict[str, dict[str, Any]]]]] # type: ignore @click.group() # type: ignore def main() -> None: pass @main.command() # type: ignore def gen() -> None: config = cast(ConfigType, toml.load("pyproject.toml")) tools = config["tool"]["kvack"]["tool"] for name, options in tools.items(): config["tool"].setdefault(name, {}).update(options) with open("pyproject.toml", "w+") as pyproject_file: toml.dump(config, pyproject_file) click.echo(f"Tool configs generated: {', '.join(tools)}") if __name__ == "__main__": sys.exit(main()) # type: ignore
"""Console script for kvack.""" import sys from typing import Any, cast import click import toml ConfigType = dict[str, dict[str, dict[str, dict[str, dict[str, Any]]]]] # type: ignore @click.group() # type: ignore def main() -> None: pass @main.command() # type: ignore def gen() -> None: config = cast(ConfigType, toml.load("pyproject.toml")) tools = config["tool"]["kvack"]["tool"] for name, options in tools.items(): config["tool"].setdefault(name, {}).update(options) with open("pyproject.toml", "w+") as pyproject_file: toml.dump(config, pyproject_file) click.echo(f"Tool configs generated: {', '.join(tools)}") if __name__ == "__main__": sys.exit(main()) # type: ignore
en
0.258968
Console script for kvack. # type: ignore # type: ignore # type: ignore # type: ignore
2.263063
2
scraper/scraper/items.py
Georgitanev/py_django_scrape
0
6613330
""" Parliament Pipeline""" # # See documentation in: # https://docs.scrapy.org/en/latest/topics/items.html import scrapy class ParliamentPipeline(scrapy.Item): """ Parliament Pipeline""" name = scrapy.Field() date_born = scrapy.Field() place_born = scrapy.Field() profession = scrapy.Field() lang = scrapy.Field() party = scrapy.Field() email = scrapy.Field() fb = scrapy.Field() url = scrapy.Field() pp = scrapy.Field() dob = scrapy.Field()
""" Parliament Pipeline""" # # See documentation in: # https://docs.scrapy.org/en/latest/topics/items.html import scrapy class ParliamentPipeline(scrapy.Item): """ Parliament Pipeline""" name = scrapy.Field() date_born = scrapy.Field() place_born = scrapy.Field() profession = scrapy.Field() lang = scrapy.Field() party = scrapy.Field() email = scrapy.Field() fb = scrapy.Field() url = scrapy.Field() pp = scrapy.Field() dob = scrapy.Field()
en
0.593407
Parliament Pipeline # # See documentation in: # https://docs.scrapy.org/en/latest/topics/items.html Parliament Pipeline
2.217769
2
src/MapMyNotesApplication/MapMyNotesApplication/models/user.py
Ryan-Gouldsmith/MajorProject-MapMyNotes
0
6613331
from MapMyNotesApplication import database from sqlalchemy import Column, Integer, String """ Interacts with the SQLAlchemy model """ class User(database.Model): #SQL Alchemy model specific attributes __tablename__ = 'users' id = Column(Integer, primary_key=True) email_address = Column(String(100), nullable=False) def __init__(self, email_address): """ Creates a new instance of the user Parameters ---------- email_address: String representation of the user """ self.email_address = email_address def save(self): """ Saves the current instance to the database Returns ------- False if there is an error. """ if len(self.email_address) > 100: return False database.session.add(self) database.session.commit() @staticmethod def find_user_by_email_address(email_address): """ Uses the SQLAlchemy API's to extract the email address from the database Parameters ---------- email_address: String representation of the email address that's been attempted to be discovered Returns ------- An instance of User if email address has been found None if there has been no results found. """ return User.query.filter_by(email_address=email_address).one_or_none()
from MapMyNotesApplication import database from sqlalchemy import Column, Integer, String """ Interacts with the SQLAlchemy model """ class User(database.Model): #SQL Alchemy model specific attributes __tablename__ = 'users' id = Column(Integer, primary_key=True) email_address = Column(String(100), nullable=False) def __init__(self, email_address): """ Creates a new instance of the user Parameters ---------- email_address: String representation of the user """ self.email_address = email_address def save(self): """ Saves the current instance to the database Returns ------- False if there is an error. """ if len(self.email_address) > 100: return False database.session.add(self) database.session.commit() @staticmethod def find_user_by_email_address(email_address): """ Uses the SQLAlchemy API's to extract the email address from the database Parameters ---------- email_address: String representation of the email address that's been attempted to be discovered Returns ------- An instance of User if email address has been found None if there has been no results found. """ return User.query.filter_by(email_address=email_address).one_or_none()
en
0.869058
Interacts with the SQLAlchemy model #SQL Alchemy model specific attributes Creates a new instance of the user Parameters ---------- email_address: String representation of the user Saves the current instance to the database Returns ------- False if there is an error. Uses the SQLAlchemy API's to extract the email address from the database Parameters ---------- email_address: String representation of the email address that's been attempted to be discovered Returns ------- An instance of User if email address has been found None if there has been no results found.
3.615662
4
Pronote.py
Yaya-Cout/Python
5
6613332
<filename>Pronote.py import pronotepy from pronotepy.ent import ac_reunion # importing ent specific function, you do not need to import anything if you dont use an ent client = pronotepy.Client('https://demo.index-education.net/pronote/eleve.html', username='demonstration', password='<PASSWORD>') # ent=ac_reunion) # ent specific if client.logged_in: print(len(client.messages()))
<filename>Pronote.py import pronotepy from pronotepy.ent import ac_reunion # importing ent specific function, you do not need to import anything if you dont use an ent client = pronotepy.Client('https://demo.index-education.net/pronote/eleve.html', username='demonstration', password='<PASSWORD>') # ent=ac_reunion) # ent specific if client.logged_in: print(len(client.messages()))
en
0.630045
# importing ent specific function, you do not need to import anything if you dont use an ent # ent=ac_reunion) # ent specific
2.437379
2
Alphabet/CaptialAlphabet/Static_cap_letter_while_loop/WHILE_LOOP_G.py
Polamreddykrishnareddy/PatternPackage
0
6613333
<reponame>Polamreddykrishnareddy/PatternPackage #G row=0 while row<6: col =0 while col<6: if (col==0 and row!=0)or(row==0 and col==1)or(row==0 and col==2) or (row==3 and col!=1) or (col==5 and row!=0 and row!=1 and row!=2) or(row==4 and col==3) or (row==5 and col!=4): print("*",end=" ") else: print(" ",end=" ") col +=1 row +=1 print()
#G row=0 while row<6: col =0 while col<6: if (col==0 and row!=0)or(row==0 and col==1)or(row==0 and col==2) or (row==3 and col!=1) or (col==5 and row!=0 and row!=1 and row!=2) or(row==4 and col==3) or (row==5 and col!=4): print("*",end=" ") else: print(" ",end=" ") col +=1 row +=1 print()
none
1
3.976588
4
net/wyun/tests/db/test_mysql_host6_uxb.py
michaelyin/im2markup-prep
3
6613334
<reponame>michaelyin/im2markup-prep<gh_stars>1-10 from unittest import TestCase from net.wyun.mer.ink.sample import Sample import numpy as np import MySQLdb import _mysql ''' test mysql db ''' class TestEquation(TestCase): def setUp(self): # Open database connection self.db = MySQLdb.connect("host6", 3308, "uxb", "uxb123", "uxb", charset = 'utf8',use_unicode=True) # ending the test def tearDown(self): """Cleaning up after the test""" print "\nTest:tearDown_:begin" # disconnect from server self.db.close() def test_mysql(self): ''' test fancy index array of indices on the matrix used a lot in 2-D matrix value update :return: ''' # prepare a cursor object using cursor() method cursor = self.db.cursor() sql = "select id, scg_ink from hw_record where request_at like '2018-09-17%' order by request_at asc;" cnt = 0 try: # Execute the SQL command # id | bigint(20) | NO | PRI | NULL | auto_increment | # | create_t | datetime | NO | | NULL | | # | image_name | varchar(255) | NO | | NULL | | # | latex | varchar(255) | NO | | NULL | | # | verified | bit(1) | YES | | NULL | | # | file_name cursor.execute(sql) # Fetch all the rows in a list of lists. results = cursor.fetchall() print 'total records: ', len(results) for row in results: cnt = cnt +1 id, scg = row[0], row[1] #print 'type of verified: ', type(verified) # Now print fetched result print 'saving scg... ', id self.save_scg_2_file(cnt, scg) #if cnt>10: break except: print "Error: unable to fecth data" raise print 'total records: ', len(results) def save_scg_2_file(self, i, scg): textfile = open('temp/2017-09-17/' + str(i) + '_scg.txt', 'w') textfile.write(scg) textfile.close()
from unittest import TestCase from net.wyun.mer.ink.sample import Sample import numpy as np import MySQLdb import _mysql ''' test mysql db ''' class TestEquation(TestCase): def setUp(self): # Open database connection self.db = MySQLdb.connect("host6", 3308, "uxb", "uxb123", "uxb", charset = 'utf8',use_unicode=True) # ending the test def tearDown(self): """Cleaning up after the test""" print "\nTest:tearDown_:begin" # disconnect from server self.db.close() def test_mysql(self): ''' test fancy index array of indices on the matrix used a lot in 2-D matrix value update :return: ''' # prepare a cursor object using cursor() method cursor = self.db.cursor() sql = "select id, scg_ink from hw_record where request_at like '2018-09-17%' order by request_at asc;" cnt = 0 try: # Execute the SQL command # id | bigint(20) | NO | PRI | NULL | auto_increment | # | create_t | datetime | NO | | NULL | | # | image_name | varchar(255) | NO | | NULL | | # | latex | varchar(255) | NO | | NULL | | # | verified | bit(1) | YES | | NULL | | # | file_name cursor.execute(sql) # Fetch all the rows in a list of lists. results = cursor.fetchall() print 'total records: ', len(results) for row in results: cnt = cnt +1 id, scg = row[0], row[1] #print 'type of verified: ', type(verified) # Now print fetched result print 'saving scg... ', id self.save_scg_2_file(cnt, scg) #if cnt>10: break except: print "Error: unable to fecth data" raise print 'total records: ', len(results) def save_scg_2_file(self, i, scg): textfile = open('temp/2017-09-17/' + str(i) + '_scg.txt', 'w') textfile.write(scg) textfile.close()
en
0.588999
test mysql db # Open database connection # ending the test Cleaning up after the test # disconnect from server test fancy index array of indices on the matrix used a lot in 2-D matrix value update :return: # prepare a cursor object using cursor() method # Execute the SQL command # id | bigint(20) | NO | PRI | NULL | auto_increment | # | create_t | datetime | NO | | NULL | | # | image_name | varchar(255) | NO | | NULL | | # | latex | varchar(255) | NO | | NULL | | # | verified | bit(1) | YES | | NULL | | # | file_name # Fetch all the rows in a list of lists. #print 'type of verified: ', type(verified) # Now print fetched result #if cnt>10: break
2.990426
3
MDEncoder/utils/plot.py
alanklam/MDEncoder
0
6613335
<reponame>alanklam/MDEncoder import matplotlib.pyplot as plt import numpy as np def plot_energy(xdata,ydata,frame=[],fname='',xylab=('X','Y'),xscale=(-1,1,0.2),yscale=(-1,1,0.2),cscale=(0,5),bins=60,Temperature=300): x , y = xdata , ydata h , xe, ye = np.histogram2d(x,y,bins=bins,range=[[xscale[0],xscale[1]],[yscale[0],yscale[1]]]) X, Y = np.meshgrid(xe,ye) plt.figure(figsize=(6,5)) RT = 0.00198588*Temperature F = -RT*np.log((h.T)/np.sum(h)) F = F - np.min(F) plt.pcolormesh(X,Y,F, cmap='jet') plt.clim(cscale[0],cscale[1]) c = plt.colorbar() c.ax.set_ylabel('Kcal/mol',fontsize=20) for n, f in enumerate(frame): plt.text(x[f],y[f], str(n+1) ,fontsize=26) def rounding(x): return int(x*10)/10 plt.xlabel(xylab[0],fontsize=22) plt.ylabel(xylab[1],fontsize=22) plt.xticks(np.arange(rounding(xscale[0]),rounding(xscale[1])+xscale[2],step=xscale[2]),fontsize=15) plt.yticks(np.arange(rounding(yscale[0]),rounding(yscale[1])+yscale[2],step=yscale[2]),fontsize=15) plt.xlim([xscale[0],xscale[1]]) plt.ylim([yscale[0],yscale[1]]) plt.tight_layout() fig = plt.gcf() if len(fname)>0: fig.savefig(fname,dpi=200) return fig
import matplotlib.pyplot as plt import numpy as np def plot_energy(xdata,ydata,frame=[],fname='',xylab=('X','Y'),xscale=(-1,1,0.2),yscale=(-1,1,0.2),cscale=(0,5),bins=60,Temperature=300): x , y = xdata , ydata h , xe, ye = np.histogram2d(x,y,bins=bins,range=[[xscale[0],xscale[1]],[yscale[0],yscale[1]]]) X, Y = np.meshgrid(xe,ye) plt.figure(figsize=(6,5)) RT = 0.00198588*Temperature F = -RT*np.log((h.T)/np.sum(h)) F = F - np.min(F) plt.pcolormesh(X,Y,F, cmap='jet') plt.clim(cscale[0],cscale[1]) c = plt.colorbar() c.ax.set_ylabel('Kcal/mol',fontsize=20) for n, f in enumerate(frame): plt.text(x[f],y[f], str(n+1) ,fontsize=26) def rounding(x): return int(x*10)/10 plt.xlabel(xylab[0],fontsize=22) plt.ylabel(xylab[1],fontsize=22) plt.xticks(np.arange(rounding(xscale[0]),rounding(xscale[1])+xscale[2],step=xscale[2]),fontsize=15) plt.yticks(np.arange(rounding(yscale[0]),rounding(yscale[1])+yscale[2],step=yscale[2]),fontsize=15) plt.xlim([xscale[0],xscale[1]]) plt.ylim([yscale[0],yscale[1]]) plt.tight_layout() fig = plt.gcf() if len(fname)>0: fig.savefig(fname,dpi=200) return fig
none
1
2.542895
3
Evolutive Computing/real.py
Jelaque/Computacion-Bioinspirada
0
6613336
<gh_stars>0 import random as rand import numpy as np import math def scale(X, x_min, x_max): nom = (X-X.min(axis=0))*(x_max-x_min) denom = int(X.max(axis=0) - X.min(axis=0)) denom[denom==0] = 1 return x_min + nom/denom def r_genetic(limit,dec,n_genes,N,p_cross,p_mut,func,it,mode): population = rget_population(dec,n_genes,N,limit) print('Poblacion inicial') print_pop_initial(population) fitness = rdecode(population,func) print_fitness(population,fitness,N) i = 1 it = it + 1 while i < it: print('****Iteracion ',i,'****') parents = rmating_pool(population, fitness, mode) population = rselect_parents(population, parents, fitness, p_cross, p_mut) fitness = rdecode(population,func) print('Nueva Poblacion') print_pop_initial(population) print_fitness(population,fitness,N) i += 1 def print_pop_initial(population): i = 1 for row in population: print(i,')\t\t',row) i += 1 print() def print_fitness(population,fitness,n): i = 1 print('Calcular la Aptitud para cada Individudo') j = 0 for row in population: print(i,')\t\t',row,'\t\t',fitness[j]) i += 1 j += 1 print() def rget_population(dec,n_genes,N,limit): population = np.random.rand(N,n_genes) for i in range(0,2): population[:,i] = np.interp(population[:,i], (min(population[:,i]), max(population[:,i])), (-10,10)) population = np.around(population, decimals = dec) return population def rdecode(population, func): fitness = np.zeros(population.shape[0]) for i in range(0,population.shape[0]): fitness[i] = func(population[i]) return fitness def rmating_pool(population, fitness, mode): n = population.shape[0] parents = np.zeros(n) for i in range(0,n): a = rand.randint(0,n-1) b = rand.randint(0,n-1) val = mode(fitness[a], fitness[b]) if val == fitness[a]: parents[i] = a else: parents[i] = b print(a+1,'\t\t',b+1,'\t\t=>\t\t',int(parents[i])+1,'\t\t=>\t\t',population[int(parents[i])]) print() return parents def rselect_parents(population, parents, fitness, p_cross, p_mut): population = np.around(population,decimals=5) p_new = population n = population.shape[0] for i in range(0,n): a = rand.randint(0,n-1) b = rand.randint(0,n-1) r = population[int(parents[a])] s = population[int(parents[b])] pcss = 0 print('Seleccion de Padres') print(a+1,'\t',b+1,' => ',int(parents[a])+1,' - ',int(parents[b])+1,' => ',r,' - ',s) if rand.uniform(0,1) >= p_cross: beta1 = rand.uniform(-0.5,1.5) beta2 = rand.uniform(-0.5,1.5) z = r x = r y = s x = [z[0],y[0]] y = [z[1],y[1]] H = abs(x[0]-x[1]) lim = H*beta1 v1 = round(rand.uniform(min(x)-lim,max(x)+lim),5) H = abs(y[0]-y[1]) lim = H*beta2 v2 = round(rand.uniform(min(y)-lim,max(y)+lim),5) if(v1 > 10 and v1 < -10) or (v2 > 10 and v2 < -10): if fitness[int(parents[a])] < fitness[int(parents[b])]: p_new[i] = r else: p_new[i] = s else: p_new[i] = [v1,v2] pcss = 1 print('Cruzamiento') else: if fitness[int(parents[a])] < fitness[int(parents[b])]: p_new[i] = r else: p_new[i] = s print('Sin Cruzamiento') print(p_new[i]) if rand.uniform(0,1) >= p_mut: d = mutation(p_new[i]) if(d[0] <= 10 and d[0] >= -10) or (d[1] <= 10 and d[1] >= -10): p_new[i] = d print('Mutacion') else: print('Sin mutacion') print(p_new[i]) print() print() return p_new def mutation(vec): for i in range(0,len(vec)): vec[i] = round(rand.uniform(vec[i]-0.3,vec[i]+0.3),5) return vec p_cross = 0.75 p_mut = 0.5 n = 5000 def f(x): return -math.cos(x[0])*math.cos(x[1])*math.exp(-math.pow(x[0]-math.pi,2)-math.pow(x[1]-math.pi,2)) print(f([8.29,-1.21])) print('Parametros:') print('- Cantidad de Individuos: ',16) print('- Cantidad de Genes por Individuo: ',2) print('- Selección por torneo (2)') print('- Probabilidad de Cruzamiento: ',p_cross) print('- Cruzamiento BLX-Alpha, Alpha = ',0.5) print('- Probabilidad de Mutación: ',p_mut) print('- Mutación Uniforme') print('- Cantidad de Iteraciones: ',n) r_genetic([[-100,100],[-100,100]],5,2,16,p_cross,p_mut,f,n,min)
import random as rand import numpy as np import math def scale(X, x_min, x_max): nom = (X-X.min(axis=0))*(x_max-x_min) denom = int(X.max(axis=0) - X.min(axis=0)) denom[denom==0] = 1 return x_min + nom/denom def r_genetic(limit,dec,n_genes,N,p_cross,p_mut,func,it,mode): population = rget_population(dec,n_genes,N,limit) print('Poblacion inicial') print_pop_initial(population) fitness = rdecode(population,func) print_fitness(population,fitness,N) i = 1 it = it + 1 while i < it: print('****Iteracion ',i,'****') parents = rmating_pool(population, fitness, mode) population = rselect_parents(population, parents, fitness, p_cross, p_mut) fitness = rdecode(population,func) print('Nueva Poblacion') print_pop_initial(population) print_fitness(population,fitness,N) i += 1 def print_pop_initial(population): i = 1 for row in population: print(i,')\t\t',row) i += 1 print() def print_fitness(population,fitness,n): i = 1 print('Calcular la Aptitud para cada Individudo') j = 0 for row in population: print(i,')\t\t',row,'\t\t',fitness[j]) i += 1 j += 1 print() def rget_population(dec,n_genes,N,limit): population = np.random.rand(N,n_genes) for i in range(0,2): population[:,i] = np.interp(population[:,i], (min(population[:,i]), max(population[:,i])), (-10,10)) population = np.around(population, decimals = dec) return population def rdecode(population, func): fitness = np.zeros(population.shape[0]) for i in range(0,population.shape[0]): fitness[i] = func(population[i]) return fitness def rmating_pool(population, fitness, mode): n = population.shape[0] parents = np.zeros(n) for i in range(0,n): a = rand.randint(0,n-1) b = rand.randint(0,n-1) val = mode(fitness[a], fitness[b]) if val == fitness[a]: parents[i] = a else: parents[i] = b print(a+1,'\t\t',b+1,'\t\t=>\t\t',int(parents[i])+1,'\t\t=>\t\t',population[int(parents[i])]) print() return parents def rselect_parents(population, parents, fitness, p_cross, p_mut): population = np.around(population,decimals=5) p_new = population n = population.shape[0] for i in range(0,n): a = rand.randint(0,n-1) b = rand.randint(0,n-1) r = population[int(parents[a])] s = population[int(parents[b])] pcss = 0 print('Seleccion de Padres') print(a+1,'\t',b+1,' => ',int(parents[a])+1,' - ',int(parents[b])+1,' => ',r,' - ',s) if rand.uniform(0,1) >= p_cross: beta1 = rand.uniform(-0.5,1.5) beta2 = rand.uniform(-0.5,1.5) z = r x = r y = s x = [z[0],y[0]] y = [z[1],y[1]] H = abs(x[0]-x[1]) lim = H*beta1 v1 = round(rand.uniform(min(x)-lim,max(x)+lim),5) H = abs(y[0]-y[1]) lim = H*beta2 v2 = round(rand.uniform(min(y)-lim,max(y)+lim),5) if(v1 > 10 and v1 < -10) or (v2 > 10 and v2 < -10): if fitness[int(parents[a])] < fitness[int(parents[b])]: p_new[i] = r else: p_new[i] = s else: p_new[i] = [v1,v2] pcss = 1 print('Cruzamiento') else: if fitness[int(parents[a])] < fitness[int(parents[b])]: p_new[i] = r else: p_new[i] = s print('Sin Cruzamiento') print(p_new[i]) if rand.uniform(0,1) >= p_mut: d = mutation(p_new[i]) if(d[0] <= 10 and d[0] >= -10) or (d[1] <= 10 and d[1] >= -10): p_new[i] = d print('Mutacion') else: print('Sin mutacion') print(p_new[i]) print() print() return p_new def mutation(vec): for i in range(0,len(vec)): vec[i] = round(rand.uniform(vec[i]-0.3,vec[i]+0.3),5) return vec p_cross = 0.75 p_mut = 0.5 n = 5000 def f(x): return -math.cos(x[0])*math.cos(x[1])*math.exp(-math.pow(x[0]-math.pi,2)-math.pow(x[1]-math.pi,2)) print(f([8.29,-1.21])) print('Parametros:') print('- Cantidad de Individuos: ',16) print('- Cantidad de Genes por Individuo: ',2) print('- Selección por torneo (2)') print('- Probabilidad de Cruzamiento: ',p_cross) print('- Cruzamiento BLX-Alpha, Alpha = ',0.5) print('- Probabilidad de Mutación: ',p_mut) print('- Mutación Uniforme') print('- Cantidad de Iteraciones: ',n) r_genetic([[-100,100],[-100,100]],5,2,16,p_cross,p_mut,f,n,min)
none
1
3.175378
3
code/global_coherence/step-1-1-slc2bursts.py
RichardScottOZ/openSAR
0
6613337
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import shutil import time import numpy as np import py_gamma as pg import atexit import glob import zipfile import json def myargsparse(): import argparse thisprog=os.path.basename(sys.argv[0]) ############################################################################ # define some strings for parsing and help ############################################################################ epilog=\ """********************************************************************************************************** \n* GAMMA S1 InSAR processor, v1.0, 2020-12-14, oc * \n* Import a list of S1 SLCs from the same datatake and store as individual bursts * \n* * \n* Input and options: * \n* 1) SLC zipfile * \n* 2) Output directory * \n* 3) Directory containing S1 precision orbits AUX_POEORB (optional) * \n* * \n* Output: * \n* Burst SLCs and associated par and tops_par files * \n* * \n*********************************************************************************************************** \nEXAMPLES: \n{thisprog} -l $PATH/slclist* -o /home/user/ -p /nas/qc.sentinel1.eo.esa.int/aux_poeorb/ """.format(thisprog=thisprog) help_slclist=\ '''List of S1 SLC zipfiles''' help_outdir=\ '''Output directory ''' help_porb=\ '''Directory containing S1 precision orbits AUX_POEORB''' help_q=\ '''Verbose Mode''' help_r=\ '''Create temporary directory in ramdisk Mode''' p = argparse.ArgumentParser(usage=None,description=epilog,prog=thisprog,formatter_class=argparse.RawDescriptionHelpFormatter) p.add_argument("-o",required=False,help=help_outdir,action='store',dest='outdir',default=os.getcwd()) p.add_argument("-z",required=True,help=help_slclist,action='store',dest='ziplist',default=None,nargs='*') p.add_argument("-p",required=False,help=help_porb,action='store',dest='porb',default=False) p.add_argument("-v","--verbose",required=False,help=help_q,action='store_true',default=False) p.add_argument("-r","--ramdisk",required=False,help=help_r,action='store_true',default=False) args=p.parse_args() args.outdir = args.outdir.rstrip('/') + '/' return args ######################################################################### # Function to be called by atexit when script stops def delfun(dirname): shutil.rmtree(dirname) ######################################################################### def S1_import(args): # Start time start = time.time() ########################################################## # Define Input/Output filenames/processing parameters # ########################################################## ziplist = args.ziplist # List of S1 zipfiles outdir = args.outdir # Output directory porb = args.porb # Precision orbits q = args.verbose # Verbose mode ramdisk = args.ramdisk # Use ramdisk outdir=outdir.rstrip('/') ziplist=list(ziplist) imno=len(ziplist) ints=10 intv=10 if imno<11: ints=int(100/(imno)) intv=int(100/(imno)) print('Import SLCs [%]: 0..', end='', flush=True) for i,f in enumerate(ziplist): f=f.rstrip() # Obtain information from filename filename=f.rpartition('/')[-1] sat=filename.split('_')[0] mode=filename.split('_')[4][-2:] acqtime=filename.split('_')[5] acqdate=filename.split('_')[5].split('T')[0] orbit=filename.split('_')[7] datatake=filename.split('_')[8] # Define second polarization if mode == 'DV': pols=['vv','vh'] elif mode == 'DH': pols=['hh','hv'] elif mode == 'SH': pols='hh' elif mode == 'SV': pols='vv' elif mode == 'HH': pols='hh' elif mode == 'VV': pols='vv' elif mode == 'HV': pols='hv' elif mode == 'VH': pols='vh' # Output directory outdir = outdir.rstrip('/') + '/' if os.path.isdir(outdir) == False: os.mkdir(outdir) # Define temporary working directory and filenames tfile = orbit + '_' + datatake + '_' + acqtime if i==0: if ramdisk: tpath = '/dev/shm/gamma_' + tfile + '_' + str(abs(np.random.randn())).replace('.','') + '/' else: tpath = outdir + '/gamma_' + tfile + '_' + str(abs(np.random.randn())).replace('.','') + '/' os.mkdir(tpath) os.chdir(tpath) # Delete temporary working directory when script stops atexit.register(delfun, tpath) # Define name of log files logout= outdir + tfile + '.log' errout= outdir + tfile + '.err' # Determine relative orbit if sat=='S1B': relpath=(int(int(orbit)-26-175*(np.floor((int(orbit)-27)/175)))) elif sat=='S1A': relpath=(int(int(orbit)-72-175*(np.floor((int(orbit)-73)/175)))) relpath=f'{relpath:03d}' # Unzip z=zipfile.ZipFile(f) product_path=tpath + filename.rstrip('zip') + 'SAFE' refburst_resource='/cluster/raid/home/oliver/Scripts/JPL/sentinel1_reference_burst_offsets.json' with open(refburst_resource,'r') as f: refbursts=json.load(f) # Import GRD for p in pols: for iw in ['iw1','iw2','iw3']: flist=[x.filename for x in z.filelist if not x.is_dir() and x.filename.find(p) > -1 and x.filename.find(iw) > -1] for u in flist: z.extract(u) # Input tiff = glob.glob( product_path + '/measurement/*' + iw + '*' + p + '*tiff' )[0] lead1 = glob.glob( product_path + '/annotation/*' + iw + '*' + p + '*xml' )[0] lead2 = glob.glob( product_path + '/annotation/calibration/calibration-*' + iw + '*' + p + '*xml' )[0] lead3 = glob.glob( product_path + '/annotation/calibration/noise-*' + iw + '*' + p + '*xml' )[0] # Output slc = tpath + tfile + '_' + p + '_' + iw + '.slc' slcpar = tpath + tfile + '_' + p + '_' + iw + '.slc.par' slctpar = tpath + tfile + '_' + p + '_' + iw + '.slc.tops_par' # Import SLC pg.par_S1_SLC(tiff,lead1,lead2,lead3,slcpar,slc,slctpar,1,'-', logf = logout, errf = errout, stdout_flag = q, stderr_flag = q) # Update orbit state vectors if porb: if os.path.isdir(porb): pg.OPOD_vec(slcpar,porb, logf = logout, errf = errout, stdout_flag = q, stderr_flag = q) # Create tabfile full_tabf=tpath + tfile + '_' + iw + '_' + p + '_fulltab' full_tab=[slc, slcpar, slctpar] pg.write_tab(full_tab, full_tabf) # Copy individual bursts pardict=pg.ParFile(slctpar) nobursts=int(pardict.get_value('number_of_bursts')) for b in range(1,nobursts+1): burstid = pardict.get_value('burst_asc_node_' + str(b))[0] burstid = str(np.floor(100*np.float32(burstid))/100) s1path=relpath.lstrip('0') refburst=float(refbursts[str(s1path)][iw[-1]]) burstid_integer=np.floor(float(burstid)) burstid_decimal=float(burstid)-burstid_integer # Three cases # 1. burstid just below integer if burstid_decimal > 0.9 and refburst< 0.1: burstid_integer+=1 burstid=str(int((burstid_integer+refburst)*100)) # 2. burstid just above integer elif burstid_decimal < 0.1 and refburst > 0.9: burstid_integer-=1 burstid=str(int((burstid_integer+refburst)*100)) # 3. burstid and refburst in same integer range else: burstid=str(int((burstid_integer+refburst)*100)) slcb = outdir + '/' + relpath + '_' + iw + '_' + p + '_' + burstid + '_' + acqdate + '.slc' slcbpar = outdir + '/' + relpath + '_' + iw + '_' + p + '_' + burstid + '_' + acqdate + '.slc.par' slcbtpar = outdir + '/' + relpath + '_' + iw + '_' + p + '_' + burstid + '_' + acqdate + '.slc.tops_par' out_tab=[slcb, slcbpar, slcbtpar] out_tabf=tpath + tfile + '_' + iw + '_' + p + '_' + burstid + '_tab' pg.write_tab(out_tab, out_tabf) # Create burst tab bursttabf=tpath + tfile + '_' + iw + '_' + p + '_bursttab' bursttab=[b,b] pg.write_tab(bursttab, bursttabf) pg.SLC_copy_ScanSAR(full_tabf,out_tabf, bursttabf,1, logf = logout, errf = errout, stdout_flag = q, stderr_flag = q) os.remove(out_tabf) os.remove(bursttabf) os.remove(full_tabf) os.remove(slc) os.remove(slcpar) os.remove(slctpar) shutil.rmtree(product_path) prct_coreg=np.floor(100*i/imno) if prct_coreg>=ints: print(str(ints), end='..', flush=True) ints=ints+intv print(('100 Done')) ######################################################################################### # Compute and display execution time # ######################################################################################### end = time.time() diff=end-start print(('Processed in ' + str(diff) + ' s')) ######################################################################### def main(): args=myargsparse() S1_import(args) if __name__ == '__main__': main()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import sys import shutil import time import numpy as np import py_gamma as pg import atexit import glob import zipfile import json def myargsparse(): import argparse thisprog=os.path.basename(sys.argv[0]) ############################################################################ # define some strings for parsing and help ############################################################################ epilog=\ """********************************************************************************************************** \n* GAMMA S1 InSAR processor, v1.0, 2020-12-14, oc * \n* Import a list of S1 SLCs from the same datatake and store as individual bursts * \n* * \n* Input and options: * \n* 1) SLC zipfile * \n* 2) Output directory * \n* 3) Directory containing S1 precision orbits AUX_POEORB (optional) * \n* * \n* Output: * \n* Burst SLCs and associated par and tops_par files * \n* * \n*********************************************************************************************************** \nEXAMPLES: \n{thisprog} -l $PATH/slclist* -o /home/user/ -p /nas/qc.sentinel1.eo.esa.int/aux_poeorb/ """.format(thisprog=thisprog) help_slclist=\ '''List of S1 SLC zipfiles''' help_outdir=\ '''Output directory ''' help_porb=\ '''Directory containing S1 precision orbits AUX_POEORB''' help_q=\ '''Verbose Mode''' help_r=\ '''Create temporary directory in ramdisk Mode''' p = argparse.ArgumentParser(usage=None,description=epilog,prog=thisprog,formatter_class=argparse.RawDescriptionHelpFormatter) p.add_argument("-o",required=False,help=help_outdir,action='store',dest='outdir',default=os.getcwd()) p.add_argument("-z",required=True,help=help_slclist,action='store',dest='ziplist',default=None,nargs='*') p.add_argument("-p",required=False,help=help_porb,action='store',dest='porb',default=False) p.add_argument("-v","--verbose",required=False,help=help_q,action='store_true',default=False) p.add_argument("-r","--ramdisk",required=False,help=help_r,action='store_true',default=False) args=p.parse_args() args.outdir = args.outdir.rstrip('/') + '/' return args ######################################################################### # Function to be called by atexit when script stops def delfun(dirname): shutil.rmtree(dirname) ######################################################################### def S1_import(args): # Start time start = time.time() ########################################################## # Define Input/Output filenames/processing parameters # ########################################################## ziplist = args.ziplist # List of S1 zipfiles outdir = args.outdir # Output directory porb = args.porb # Precision orbits q = args.verbose # Verbose mode ramdisk = args.ramdisk # Use ramdisk outdir=outdir.rstrip('/') ziplist=list(ziplist) imno=len(ziplist) ints=10 intv=10 if imno<11: ints=int(100/(imno)) intv=int(100/(imno)) print('Import SLCs [%]: 0..', end='', flush=True) for i,f in enumerate(ziplist): f=f.rstrip() # Obtain information from filename filename=f.rpartition('/')[-1] sat=filename.split('_')[0] mode=filename.split('_')[4][-2:] acqtime=filename.split('_')[5] acqdate=filename.split('_')[5].split('T')[0] orbit=filename.split('_')[7] datatake=filename.split('_')[8] # Define second polarization if mode == 'DV': pols=['vv','vh'] elif mode == 'DH': pols=['hh','hv'] elif mode == 'SH': pols='hh' elif mode == 'SV': pols='vv' elif mode == 'HH': pols='hh' elif mode == 'VV': pols='vv' elif mode == 'HV': pols='hv' elif mode == 'VH': pols='vh' # Output directory outdir = outdir.rstrip('/') + '/' if os.path.isdir(outdir) == False: os.mkdir(outdir) # Define temporary working directory and filenames tfile = orbit + '_' + datatake + '_' + acqtime if i==0: if ramdisk: tpath = '/dev/shm/gamma_' + tfile + '_' + str(abs(np.random.randn())).replace('.','') + '/' else: tpath = outdir + '/gamma_' + tfile + '_' + str(abs(np.random.randn())).replace('.','') + '/' os.mkdir(tpath) os.chdir(tpath) # Delete temporary working directory when script stops atexit.register(delfun, tpath) # Define name of log files logout= outdir + tfile + '.log' errout= outdir + tfile + '.err' # Determine relative orbit if sat=='S1B': relpath=(int(int(orbit)-26-175*(np.floor((int(orbit)-27)/175)))) elif sat=='S1A': relpath=(int(int(orbit)-72-175*(np.floor((int(orbit)-73)/175)))) relpath=f'{relpath:03d}' # Unzip z=zipfile.ZipFile(f) product_path=tpath + filename.rstrip('zip') + 'SAFE' refburst_resource='/cluster/raid/home/oliver/Scripts/JPL/sentinel1_reference_burst_offsets.json' with open(refburst_resource,'r') as f: refbursts=json.load(f) # Import GRD for p in pols: for iw in ['iw1','iw2','iw3']: flist=[x.filename for x in z.filelist if not x.is_dir() and x.filename.find(p) > -1 and x.filename.find(iw) > -1] for u in flist: z.extract(u) # Input tiff = glob.glob( product_path + '/measurement/*' + iw + '*' + p + '*tiff' )[0] lead1 = glob.glob( product_path + '/annotation/*' + iw + '*' + p + '*xml' )[0] lead2 = glob.glob( product_path + '/annotation/calibration/calibration-*' + iw + '*' + p + '*xml' )[0] lead3 = glob.glob( product_path + '/annotation/calibration/noise-*' + iw + '*' + p + '*xml' )[0] # Output slc = tpath + tfile + '_' + p + '_' + iw + '.slc' slcpar = tpath + tfile + '_' + p + '_' + iw + '.slc.par' slctpar = tpath + tfile + '_' + p + '_' + iw + '.slc.tops_par' # Import SLC pg.par_S1_SLC(tiff,lead1,lead2,lead3,slcpar,slc,slctpar,1,'-', logf = logout, errf = errout, stdout_flag = q, stderr_flag = q) # Update orbit state vectors if porb: if os.path.isdir(porb): pg.OPOD_vec(slcpar,porb, logf = logout, errf = errout, stdout_flag = q, stderr_flag = q) # Create tabfile full_tabf=tpath + tfile + '_' + iw + '_' + p + '_fulltab' full_tab=[slc, slcpar, slctpar] pg.write_tab(full_tab, full_tabf) # Copy individual bursts pardict=pg.ParFile(slctpar) nobursts=int(pardict.get_value('number_of_bursts')) for b in range(1,nobursts+1): burstid = pardict.get_value('burst_asc_node_' + str(b))[0] burstid = str(np.floor(100*np.float32(burstid))/100) s1path=relpath.lstrip('0') refburst=float(refbursts[str(s1path)][iw[-1]]) burstid_integer=np.floor(float(burstid)) burstid_decimal=float(burstid)-burstid_integer # Three cases # 1. burstid just below integer if burstid_decimal > 0.9 and refburst< 0.1: burstid_integer+=1 burstid=str(int((burstid_integer+refburst)*100)) # 2. burstid just above integer elif burstid_decimal < 0.1 and refburst > 0.9: burstid_integer-=1 burstid=str(int((burstid_integer+refburst)*100)) # 3. burstid and refburst in same integer range else: burstid=str(int((burstid_integer+refburst)*100)) slcb = outdir + '/' + relpath + '_' + iw + '_' + p + '_' + burstid + '_' + acqdate + '.slc' slcbpar = outdir + '/' + relpath + '_' + iw + '_' + p + '_' + burstid + '_' + acqdate + '.slc.par' slcbtpar = outdir + '/' + relpath + '_' + iw + '_' + p + '_' + burstid + '_' + acqdate + '.slc.tops_par' out_tab=[slcb, slcbpar, slcbtpar] out_tabf=tpath + tfile + '_' + iw + '_' + p + '_' + burstid + '_tab' pg.write_tab(out_tab, out_tabf) # Create burst tab bursttabf=tpath + tfile + '_' + iw + '_' + p + '_bursttab' bursttab=[b,b] pg.write_tab(bursttab, bursttabf) pg.SLC_copy_ScanSAR(full_tabf,out_tabf, bursttabf,1, logf = logout, errf = errout, stdout_flag = q, stderr_flag = q) os.remove(out_tabf) os.remove(bursttabf) os.remove(full_tabf) os.remove(slc) os.remove(slcpar) os.remove(slctpar) shutil.rmtree(product_path) prct_coreg=np.floor(100*i/imno) if prct_coreg>=ints: print(str(ints), end='..', flush=True) ints=ints+intv print(('100 Done')) ######################################################################################### # Compute and display execution time # ######################################################################################### end = time.time() diff=end-start print(('Processed in ' + str(diff) + ' s')) ######################################################################### def main(): args=myargsparse() S1_import(args) if __name__ == '__main__': main()
de
0.291475
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ############################################################################ # define some strings for parsing and help ############################################################################ ********************************************************************************************************** \n* GAMMA S1 InSAR processor, v1.0, 2020-12-14, oc * \n* Import a list of S1 SLCs from the same datatake and store as individual bursts * \n* * \n* Input and options: * \n* 1) SLC zipfile * \n* 2) Output directory * \n* 3) Directory containing S1 precision orbits AUX_POEORB (optional) * \n* * \n* Output: * \n* Burst SLCs and associated par and tops_par files * \n* * \n*********************************************************************************************************** \nEXAMPLES: \n{thisprog} -l $PATH/slclist* -o /home/user/ -p /nas/qc.sentinel1.eo.esa.int/aux_poeorb/ List of S1 SLC zipfiles Output directory Directory containing S1 precision orbits AUX_POEORB Verbose Mode Create temporary directory in ramdisk Mode ######################################################################### # Function to be called by atexit when script stops ######################################################################### # Start time ########################################################## # Define Input/Output filenames/processing parameters # ########################################################## # List of S1 zipfiles # Output directory # Precision orbits # Verbose mode # Use ramdisk # Obtain information from filename # Define second polarization # Output directory # Define temporary working directory and filenames # Delete temporary working directory when script stops # Define name of log files # Determine relative orbit # Unzip # Import GRD # Input # Output # Import SLC # Update orbit state vectors # Create tabfile # Copy individual bursts # Three cases # 1. burstid just below integer # 2. burstid just above integer # 3. burstid and refburst in same integer range # Create burst tab ######################################################################################### # Compute and display execution time # ######################################################################################### #########################################################################
2.411273
2
example/urls.py
ifanrx/django-ajax-upload-widget
47
6613338
from django.conf.urls.defaults import patterns, url urlpatterns = patterns('example.views', url(r'^add/$', 'add_edit_product', name='example-add-product'), url(r'^edit/(?P<product_id>\d+)/$', 'add_edit_product', name='example-edit-product'), )
from django.conf.urls.defaults import patterns, url urlpatterns = patterns('example.views', url(r'^add/$', 'add_edit_product', name='example-add-product'), url(r'^edit/(?P<product_id>\d+)/$', 'add_edit_product', name='example-edit-product'), )
none
1
1.782382
2
projectq/cengines/_basicmapper_test.py
ssc1729/ProjectQ
795
6613339
<gh_stars>100-1000 # -*- coding: utf-8 -*- # Copyright 2018 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for projectq.cengines._basicmapper.py.""" from projectq.cengines import DummyEngine, _basicmapper from projectq.meta import LogicalQubitIDTag from projectq.ops import Allocate, BasicGate, Command, Deallocate, FlushGate, Measure from projectq.types import WeakQubitRef def test_basic_mapper_engine_send_cmd_with_mapped_ids(): mapper = _basicmapper.BasicMapperEngine() mapper.current_mapping = {0: 3, 1: 2, 2: 1, 3: 0} backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend # generate a few commands qb0 = WeakQubitRef(engine=None, idx=0) qb1 = WeakQubitRef(engine=None, idx=1) qb2 = WeakQubitRef(engine=None, idx=2) qb3 = WeakQubitRef(engine=None, idx=3) cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0],), controls=[], tags=[]) cmd1 = Command(engine=None, gate=Deallocate, qubits=([qb1],), controls=[], tags=[]) cmd2 = Command(engine=None, gate=Measure, qubits=([qb2],), controls=[], tags=["SomeTag"]) cmd3 = Command( engine=None, gate=BasicGate(), qubits=([qb0, qb1], [qb2]), controls=[qb3], tags=[], ) cmd4 = Command(None, FlushGate(), ([WeakQubitRef(None, -1)],)) mapper._send_cmd_with_mapped_ids(cmd0) mapper._send_cmd_with_mapped_ids(cmd1) mapper._send_cmd_with_mapped_ids(cmd2) mapper._send_cmd_with_mapped_ids(cmd3) mapper._send_cmd_with_mapped_ids(cmd4) rcmd0 = backend.received_commands[0] rcmd1 = backend.received_commands[1] rcmd2 = backend.received_commands[2] rcmd3 = backend.received_commands[3] rcmd4 = backend.received_commands[4] assert rcmd0.gate == Allocate assert rcmd0.qubits == ([qb3],) assert rcmd1.gate == Deallocate assert rcmd1.qubits == ([qb2],) assert rcmd2.gate == Measure assert rcmd2.qubits == ([qb1],) assert rcmd2.tags == ["SomeTag", LogicalQubitIDTag(2)] assert rcmd3.gate == BasicGate() assert rcmd3.qubits == ([qb3, qb2], [qb1]) assert rcmd3.control_qubits == [qb0] assert len(rcmd4.qubits) == 1 assert len(rcmd4.qubits[0]) == 1 assert rcmd4.qubits[0][0].id == -1
# -*- coding: utf-8 -*- # Copyright 2018 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for projectq.cengines._basicmapper.py.""" from projectq.cengines import DummyEngine, _basicmapper from projectq.meta import LogicalQubitIDTag from projectq.ops import Allocate, BasicGate, Command, Deallocate, FlushGate, Measure from projectq.types import WeakQubitRef def test_basic_mapper_engine_send_cmd_with_mapped_ids(): mapper = _basicmapper.BasicMapperEngine() mapper.current_mapping = {0: 3, 1: 2, 2: 1, 3: 0} backend = DummyEngine(save_commands=True) backend.is_last_engine = True mapper.next_engine = backend # generate a few commands qb0 = WeakQubitRef(engine=None, idx=0) qb1 = WeakQubitRef(engine=None, idx=1) qb2 = WeakQubitRef(engine=None, idx=2) qb3 = WeakQubitRef(engine=None, idx=3) cmd0 = Command(engine=None, gate=Allocate, qubits=([qb0],), controls=[], tags=[]) cmd1 = Command(engine=None, gate=Deallocate, qubits=([qb1],), controls=[], tags=[]) cmd2 = Command(engine=None, gate=Measure, qubits=([qb2],), controls=[], tags=["SomeTag"]) cmd3 = Command( engine=None, gate=BasicGate(), qubits=([qb0, qb1], [qb2]), controls=[qb3], tags=[], ) cmd4 = Command(None, FlushGate(), ([WeakQubitRef(None, -1)],)) mapper._send_cmd_with_mapped_ids(cmd0) mapper._send_cmd_with_mapped_ids(cmd1) mapper._send_cmd_with_mapped_ids(cmd2) mapper._send_cmd_with_mapped_ids(cmd3) mapper._send_cmd_with_mapped_ids(cmd4) rcmd0 = backend.received_commands[0] rcmd1 = backend.received_commands[1] rcmd2 = backend.received_commands[2] rcmd3 = backend.received_commands[3] rcmd4 = backend.received_commands[4] assert rcmd0.gate == Allocate assert rcmd0.qubits == ([qb3],) assert rcmd1.gate == Deallocate assert rcmd1.qubits == ([qb2],) assert rcmd2.gate == Measure assert rcmd2.qubits == ([qb1],) assert rcmd2.tags == ["SomeTag", LogicalQubitIDTag(2)] assert rcmd3.gate == BasicGate() assert rcmd3.qubits == ([qb3, qb2], [qb1]) assert rcmd3.control_qubits == [qb0] assert len(rcmd4.qubits) == 1 assert len(rcmd4.qubits[0]) == 1 assert rcmd4.qubits[0][0].id == -1
en
0.842128
# -*- coding: utf-8 -*- # Copyright 2018 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for projectq.cengines._basicmapper.py. # generate a few commands
1.87275
2
yunionclient/api/cloudkubeclusters.py
yunionyun/python_yunionsdk
3
6613340
<filename>yunionclient/api/cloudkubeclusters.py<gh_stars>1-10 from yunionclient.common import base class CloudKubeCluster(base.ResourceBase): pass class CloudKubeClusterManager(base.StandaloneManager): resource_class = CloudKubeCluster keyword = 'cloud_kube_cluster' keyword_plural = 'cloud_kube_clusters' _columns = ["Id", "Name", "Description"]
<filename>yunionclient/api/cloudkubeclusters.py<gh_stars>1-10 from yunionclient.common import base class CloudKubeCluster(base.ResourceBase): pass class CloudKubeClusterManager(base.StandaloneManager): resource_class = CloudKubeCluster keyword = 'cloud_kube_cluster' keyword_plural = 'cloud_kube_clusters' _columns = ["Id", "Name", "Description"]
none
1
1.805372
2
main/migrations/0042_auto_20200821_1342.py
alecstein/mataroa-personal
30
6613341
<gh_stars>10-100 # Generated by Django 3.1 on 2020-08-21 13:42 import uuid from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("main", "0041_auto_20200820_2107"), ] operations = [ migrations.AlterField( model_name="postnotification", name="unsubscribe_key", field=models.UUIDField(default=uuid.uuid4, unique=True), ), ]
# Generated by Django 3.1 on 2020-08-21 13:42 import uuid from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("main", "0041_auto_20200820_2107"), ] operations = [ migrations.AlterField( model_name="postnotification", name="unsubscribe_key", field=models.UUIDField(default=uuid.uuid4, unique=True), ), ]
en
0.814931
# Generated by Django 3.1 on 2020-08-21 13:42
1.553744
2
test/__init__.py
JukeboxPipeline/jukebox-core
2
6613342
""" The tests module holds all unittests that we run. To run them use tox. Just go to the project root and run the tox command. Because some tools and modules need access to the database we have to establish a test database. This is automatically done, when djadapter is imported and the tesing environment is initialized (env var JUKEBOX_TESTING). The test db name will be saved in another env var ``TEST_DB``. The test db name will be the name of the default database preceded by ``test_``. As for now, we do not destroy the test db at the end of the test. It will be destroyed when the test runs again. This is easier, when running multiple python instances simultaniously. """ import os os.environ['JUKEBOX_TESTING'] = 'True'
""" The tests module holds all unittests that we run. To run them use tox. Just go to the project root and run the tox command. Because some tools and modules need access to the database we have to establish a test database. This is automatically done, when djadapter is imported and the tesing environment is initialized (env var JUKEBOX_TESTING). The test db name will be saved in another env var ``TEST_DB``. The test db name will be the name of the default database preceded by ``test_``. As for now, we do not destroy the test db at the end of the test. It will be destroyed when the test runs again. This is easier, when running multiple python instances simultaniously. """ import os os.environ['JUKEBOX_TESTING'] = 'True'
en
0.794782
The tests module holds all unittests that we run. To run them use tox. Just go to the project root and run the tox command. Because some tools and modules need access to the database we have to establish a test database. This is automatically done, when djadapter is imported and the tesing environment is initialized (env var JUKEBOX_TESTING). The test db name will be saved in another env var ``TEST_DB``. The test db name will be the name of the default database preceded by ``test_``. As for now, we do not destroy the test db at the end of the test. It will be destroyed when the test runs again. This is easier, when running multiple python instances simultaniously.
2.428664
2
data/preprocess_persona-chat.py
adamlin120/lm
0
6613343
<gh_stars>0 import json from pathlib import Path SPLITS = ["train", "valid", "test"] SILENCE = "__ SILENCE __" def main(): dir = Path("./persona-chat/personachat_self_original.json") output_path = Path(f"persona.processed.json") output_debug_path = Path(f"persona.processed.debug.json") raw_data = json.loads(dir.read_text()) datasets = {k: {} for k in SPLITS} for split, dials in raw_data.items(): num_instance = len(dials) for i, dial in enumerate(dials): if split == "valid" and i > num_instance // 2: split = "test" dial_idx = f"{split}_{i}" turns = dial["utterances"][-1]["history"] turns.append(dial["utterances"][-1]["candidates"][-1]) if any(turn == SILENCE for turn in turns): continue datasets[split][dial_idx] = turns assert all(split in datasets for split in SPLITS) output_path.write_text(json.dumps(datasets, indent=2)) debug_dataset = { split: {dial_idx: turns for dial_idx, turns in list(dataset.items())[:10]} for split, dataset in datasets.items() } output_debug_path.write_text(json.dumps(debug_dataset, indent=2)) if __name__ == "__main__": main()
import json from pathlib import Path SPLITS = ["train", "valid", "test"] SILENCE = "__ SILENCE __" def main(): dir = Path("./persona-chat/personachat_self_original.json") output_path = Path(f"persona.processed.json") output_debug_path = Path(f"persona.processed.debug.json") raw_data = json.loads(dir.read_text()) datasets = {k: {} for k in SPLITS} for split, dials in raw_data.items(): num_instance = len(dials) for i, dial in enumerate(dials): if split == "valid" and i > num_instance // 2: split = "test" dial_idx = f"{split}_{i}" turns = dial["utterances"][-1]["history"] turns.append(dial["utterances"][-1]["candidates"][-1]) if any(turn == SILENCE for turn in turns): continue datasets[split][dial_idx] = turns assert all(split in datasets for split in SPLITS) output_path.write_text(json.dumps(datasets, indent=2)) debug_dataset = { split: {dial_idx: turns for dial_idx, turns in list(dataset.items())[:10]} for split, dataset in datasets.items() } output_debug_path.write_text(json.dumps(debug_dataset, indent=2)) if __name__ == "__main__": main()
none
1
2.749389
3
authors/apps/articles/tests/test_reviews.py
andela/ah-the-answer-backend
0
6613344
from .commons import * class TestReviewModel(BaseSetup): def setUp(self): user = User.objects.create(username="johndoe") article = Article.objects.create( title="Test title", body="This is a very awesome article on testing tests", description="Written by testing tester", tags=[], author=user ) self.review_body = "<NAME>" self.rating_value = int(5) self.review = ReviewsModel( article=article, review_body=self.review_body, rating_value=self.rating_value, reviewed_by=user, ) def test_model_can_create_review(self): """Test whether model can create a record""" initial = ReviewsModel.objects.count() self.review.save() updated = ReviewsModel.objects.count() self.assertNotEqual(initial, updated) class ReviewTestCase(BaseSetup): def test_user_can_create_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_user_can_get_reviews(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.get( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), format="json" ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertContains(response, "I really liked the article") def test_user_can_edit_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.put(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': "Pete" }), data={ "review": { "review_body": "I did not liked the article", "rating_value": 3 } }, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_user_can_delete_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.delete(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': "Pete" })) self.assertEqual(response.status_code, status.HTTP_200_OK) response_delete = self.client2.get( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), format="json" ) self.assertEqual(response_delete.status_code, status.HTTP_404_NOT_FOUND) self.assertRaises(Exception) def test_unauthenticated_user_cant_create_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.client2.logout() response = self.post_review() self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_review_creation_with_wrong_values(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.post( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), data={ "review": { "review_body": "I really liked the article", "rating_value": True } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_user_cannot_review_own_article(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client.post( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), data={ "review": { "review_body": "I really liked the article", "rating_value": 5 } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_user_cant_edit_non_existent_review(self): self.test_user_can_delete_review() response = self.client2.delete(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': User.objects.filter(username="Pete").first() })) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertRaises(Exception) def test_user_cant_review_non_existent_article(self): response = self.client2.post( reverse('articles:review', kwargs={ "slug": "fake_article" }), data={ "review": { "review_body": "I really liked the article", "rating_value": 5 } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertRaises(Exception) def test_malformed_url(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client.put( reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, "username": "None" }), data={ "review": { "review_body": "I really liked the article", "rating_value": 5 } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_reviewer_cannot_double_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response_duplicate = self.post_review() self.assertEqual(response_duplicate.status_code, status.HTTP_403_FORBIDDEN) def test_user_cant_get_inexistent_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.get( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), format="json" ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_user_cant_edit_anothers_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response2 = self.post_review2() self.assertEqual(response2.status_code, status.HTTP_201_CREATED) edit_response = self.client2.put(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': "Jane" }), data={ "review": { "review_body": "I did not liked the article", "rating_value": 3 } }, format="json") self.assertEqual(edit_response.status_code, status.HTTP_403_FORBIDDEN)
from .commons import * class TestReviewModel(BaseSetup): def setUp(self): user = User.objects.create(username="johndoe") article = Article.objects.create( title="Test title", body="This is a very awesome article on testing tests", description="Written by testing tester", tags=[], author=user ) self.review_body = "<NAME>" self.rating_value = int(5) self.review = ReviewsModel( article=article, review_body=self.review_body, rating_value=self.rating_value, reviewed_by=user, ) def test_model_can_create_review(self): """Test whether model can create a record""" initial = ReviewsModel.objects.count() self.review.save() updated = ReviewsModel.objects.count() self.assertNotEqual(initial, updated) class ReviewTestCase(BaseSetup): def test_user_can_create_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_user_can_get_reviews(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.get( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), format="json" ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertContains(response, "I really liked the article") def test_user_can_edit_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.put(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': "Pete" }), data={ "review": { "review_body": "I did not liked the article", "rating_value": 3 } }, format="json") self.assertEqual(response.status_code, status.HTTP_200_OK) def test_user_can_delete_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.delete(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': "Pete" })) self.assertEqual(response.status_code, status.HTTP_200_OK) response_delete = self.client2.get( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), format="json" ) self.assertEqual(response_delete.status_code, status.HTTP_404_NOT_FOUND) self.assertRaises(Exception) def test_unauthenticated_user_cant_create_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.client2.logout() response = self.post_review() self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_review_creation_with_wrong_values(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.post( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), data={ "review": { "review_body": "I really liked the article", "rating_value": True } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_user_cannot_review_own_article(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client.post( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), data={ "review": { "review_body": "I really liked the article", "rating_value": 5 } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_user_cant_edit_non_existent_review(self): self.test_user_can_delete_review() response = self.client2.delete(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': User.objects.filter(username="Pete").first() })) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertRaises(Exception) def test_user_cant_review_non_existent_article(self): response = self.client2.post( reverse('articles:review', kwargs={ "slug": "fake_article" }), data={ "review": { "review_body": "I really liked the article", "rating_value": 5 } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) self.assertRaises(Exception) def test_malformed_url(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client.put( reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, "username": "None" }), data={ "review": { "review_body": "I really liked the article", "rating_value": 5 } }, format="json" ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_reviewer_cannot_double_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response_duplicate = self.post_review() self.assertEqual(response_duplicate.status_code, status.HTTP_403_FORBIDDEN) def test_user_cant_get_inexistent_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client2.get( reverse('articles:review', kwargs={ "slug": Article.objects.get().slug }), format="json" ) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) def test_user_cant_edit_anothers_review(self): response = self.post_article() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.post_review() self.assertEqual(response.status_code, status.HTTP_201_CREATED) response2 = self.post_review2() self.assertEqual(response2.status_code, status.HTTP_201_CREATED) edit_response = self.client2.put(reverse('articles:alter-review', kwargs={ "slug": Article.objects.get().slug, 'username': "Jane" }), data={ "review": { "review_body": "I did not liked the article", "rating_value": 3 } }, format="json") self.assertEqual(edit_response.status_code, status.HTTP_403_FORBIDDEN)
en
0.457368
Test whether model can create a record
2.415828
2
actiereg/_basic/admin.py
albertvisser/actiereg
0
6613345
<gh_stars>0 """register models to the admin site """ from django.contrib import admin import actiereg._basic.models as my admin.site.register(my.Status) admin.site.register(my.Soort) admin.site.register(my.Page) admin.site.register(my.Actie) admin.site.register(my.Event) admin.site.register(my.SortOrder) admin.site.register(my.Selection) admin.site.register(my.Worker)
"""register models to the admin site """ from django.contrib import admin import actiereg._basic.models as my admin.site.register(my.Status) admin.site.register(my.Soort) admin.site.register(my.Page) admin.site.register(my.Actie) admin.site.register(my.Event) admin.site.register(my.SortOrder) admin.site.register(my.Selection) admin.site.register(my.Worker)
en
0.862266
register models to the admin site
1.804322
2
test/test_cli.py
bagheera-lang/bagheera-python
0
6613346
from bagheera import cli assert cli.main() is None
from bagheera import cli assert cli.main() is None
none
1
1.038945
1
lib/rucio/db/sqla/migrate_repo/versions/3ad36e2268b0_create_collection_replicas_updates_table.py
balrampariyarath/rucio
1
6613347
<reponame>balrampariyarath/rucio # Copyright European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - <NAME>, <<EMAIL>>, 2015 # - <NAME>, <<EMAIL>>, 2017 """Create collection_replicas_updates table Revision ID: 3ad36e2268b0 Revises: ae2a56fcc89 Create Date: 2015-03-16 15:32:59.620185 """ from alembic import context from alembic.op import (create_table, create_primary_key, add_column, create_check_constraint, create_index, drop_constraint, drop_column, drop_table, drop_index) import sqlalchemy as sa from rucio.db.sqla.constants import DIDType from rucio.db.sqla.types import GUID # revision identifiers, used by Alembic. revision = '3ad36e2268b0' down_revision = '42db2617c364' def upgrade(): ''' upgrade method ''' if context.get_context().dialect.name != 'sqlite': add_column('collection_replicas', sa.Column('available_replicas_cnt', sa.BigInteger())) add_column('collection_replicas', sa.Column('available_bytes', sa.BigInteger())) create_table('updated_col_rep', sa.Column('id', GUID()), sa.Column('scope', sa.String(25)), sa.Column('name', sa.String(255)), sa.Column('did_type', DIDType.db_type(name='UPDATED_COL_REP_TYPE_CHK')), sa.Column('rse_id', GUID()), sa.Column('updated_at', sa.DateTime), sa.Column('created_at', sa.DateTime)) if context.get_context().dialect.name != 'sqlite': create_primary_key('UPDATED_COL_REP_PK', 'updated_col_rep', ['id']) create_check_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep', 'scope IS NOT NULL') create_check_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep', 'name IS NOT NULL') create_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep', ['scope', 'name', 'rse_id']) def downgrade(): ''' downgrade method ''' if context.get_context().dialect.name != 'sqlite': drop_column('collection_replicas', 'available_replicas_cnt') drop_column('collection_replicas', 'available_bytes') if context.get_context().dialect.name == 'postgresql': drop_constraint('UPDATED_COL_REP_PK', 'updated_col_rep', type_='primary') drop_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep') drop_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep') drop_constraint('UPDATED_COL_REP_TYPE_CHK', 'updated_col_rep') drop_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep') drop_table('updated_col_rep')
# Copyright European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - <NAME>, <<EMAIL>>, 2015 # - <NAME>, <<EMAIL>>, 2017 """Create collection_replicas_updates table Revision ID: 3ad36e2268b0 Revises: ae2a56fcc89 Create Date: 2015-03-16 15:32:59.620185 """ from alembic import context from alembic.op import (create_table, create_primary_key, add_column, create_check_constraint, create_index, drop_constraint, drop_column, drop_table, drop_index) import sqlalchemy as sa from rucio.db.sqla.constants import DIDType from rucio.db.sqla.types import GUID # revision identifiers, used by Alembic. revision = '3ad36e2268b0' down_revision = '42db2617c364' def upgrade(): ''' upgrade method ''' if context.get_context().dialect.name != 'sqlite': add_column('collection_replicas', sa.Column('available_replicas_cnt', sa.BigInteger())) add_column('collection_replicas', sa.Column('available_bytes', sa.BigInteger())) create_table('updated_col_rep', sa.Column('id', GUID()), sa.Column('scope', sa.String(25)), sa.Column('name', sa.String(255)), sa.Column('did_type', DIDType.db_type(name='UPDATED_COL_REP_TYPE_CHK')), sa.Column('rse_id', GUID()), sa.Column('updated_at', sa.DateTime), sa.Column('created_at', sa.DateTime)) if context.get_context().dialect.name != 'sqlite': create_primary_key('UPDATED_COL_REP_PK', 'updated_col_rep', ['id']) create_check_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep', 'scope IS NOT NULL') create_check_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep', 'name IS NOT NULL') create_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep', ['scope', 'name', 'rse_id']) def downgrade(): ''' downgrade method ''' if context.get_context().dialect.name != 'sqlite': drop_column('collection_replicas', 'available_replicas_cnt') drop_column('collection_replicas', 'available_bytes') if context.get_context().dialect.name == 'postgresql': drop_constraint('UPDATED_COL_REP_PK', 'updated_col_rep', type_='primary') drop_constraint('UPDATED_COL_REP_SCOPE_NN', 'updated_col_rep') drop_constraint('UPDATED_COL_REP_NAME_NN', 'updated_col_rep') drop_constraint('UPDATED_COL_REP_TYPE_CHK', 'updated_col_rep') drop_index('UPDATED_COL_REP_SNR_IDX', 'updated_col_rep') drop_table('updated_col_rep')
en
0.653683
# Copyright European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - <NAME>, <<EMAIL>>, 2015 # - <NAME>, <<EMAIL>>, 2017 Create collection_replicas_updates table Revision ID: 3ad36e2268b0 Revises: ae2a56fcc89 Create Date: 2015-03-16 15:32:59.620185 # revision identifiers, used by Alembic. upgrade method downgrade method
1.729181
2
slixmpp/plugins/xep_0333/stanza.py
marconfus/slixmpp
0
6613348
""" slixmpp: The Slick XMPP Library Copyright (C) 2016 <NAME> This file is part of slixmpp. See the file LICENSE for copying permission. """ from slixmpp.xmlstream import ElementBase class Markable(ElementBase): name = 'markable' plugin_attrib = 'markable' namespace = 'urn:xmpp:chat-markers:0' class Received(ElementBase): name = 'received' plugin_attrib = 'received' namespace = 'urn:xmpp:chat-markers:0' interfaces = {'id'} class Displayed(ElementBase): name = 'displayed' plugin_attrib = 'displayed' namespace = 'urn:xmpp:chat-markers:0' interfaces = {'id'} class Acknowledged(ElementBase): name = 'acknowledged' plugin_attrib = 'acknowledged' namespace = 'urn:xmpp:chat-markers:0' interfaces = {'id'}
""" slixmpp: The Slick XMPP Library Copyright (C) 2016 <NAME> This file is part of slixmpp. See the file LICENSE for copying permission. """ from slixmpp.xmlstream import ElementBase class Markable(ElementBase): name = 'markable' plugin_attrib = 'markable' namespace = 'urn:xmpp:chat-markers:0' class Received(ElementBase): name = 'received' plugin_attrib = 'received' namespace = 'urn:xmpp:chat-markers:0' interfaces = {'id'} class Displayed(ElementBase): name = 'displayed' plugin_attrib = 'displayed' namespace = 'urn:xmpp:chat-markers:0' interfaces = {'id'} class Acknowledged(ElementBase): name = 'acknowledged' plugin_attrib = 'acknowledged' namespace = 'urn:xmpp:chat-markers:0' interfaces = {'id'}
en
0.726178
slixmpp: The Slick XMPP Library Copyright (C) 2016 <NAME> This file is part of slixmpp. See the file LICENSE for copying permission.
1.537792
2
ProjectFolder/Code/main.py
sebastianwindeck/DeepLearningProject
1
6613349
import matplotlib import datetime import inspect import os import numpy as np from evaluate import final_score from extractfeatures import prepareData from model import Noiser, AMTNetwork from visualize import visualize_input matplotlib.use('Agg') if __name__ == '__main__': proj_root = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), '..') # Define a parameter structure args args = { # model parameters: 'model_name': 'baseline', 'init_lr': 1e-1, 'lr_decay': 'linear', # parameters for audio 'bin_multiple': 3, 'residual': 'False', 'hop_length': 512, 'sr': 16000, 'spec_type': 'cqt', 'min_midi': 37, # 21 corresponds to A0 (lowest tone on a "normal" piano), 27.5Hz 'max_midi': 92, # 108 corresponds to C8 (highest tone on a "normal" piano), 4.2kHz 'window_size': 7, # choose higher value than 5 # training parameters: 'train_basemodel': True, 'epochs_on_clean': 1000, 'epochs_on_noisy': 50, 'noise_epochs': 10, 'min_difficulty_on_noisy': 0.19, 'max_difficulty_on_noisy': 0.60, # noise parameters: 'noise_type': 'simplistic', 'noise_frames_per_epoch': 100, 'noise_initial_level': 0.03, 'noise_increase_factor': 2.5, 'noise_decrease_factor': 2, 'balance_classes': True, # directories: 'proj_root': proj_root, # - root directory of maps (with substructure as given in maps): 'wav_dir': os.path.join(proj_root, 'Audiodaten'), # - directory to store checkpoint files. All files are stored in this directory: 'checkpoint_root': os.path.join(proj_root, 'Checkpoints', 'train' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')), 'basemodel_root': os.path.join(proj_root, 'Basemodel'), 'maxFramesPerFile': -1, # set to -1 to ignore 'maxFrames': -1 # set to -1 to ignore } # derived parameters: args['note_range'] = args['max_midi'] - args['min_midi'] + 1 args['feature_bins'] = args['note_range'] * args['bin_multiple'] args['input_shape'] = (args['window_size'], args['feature_bins']) args['input_shape_channels'] = (args['window_size'], args['feature_bins'], 1) args['n_bins'] = args['note_range'] * args['bin_multiple'] if os.path.exists(args['checkpoint_root']): print("WARNING: Checkpoint root directory already exists!!") else: os.mkdir(args['checkpoint_root']) print('Created checkpoint root.') if os.path.exists(args['basemodel_root']): print("WARNING: Basemodel root directory already exists!!") else: os.mkdir(args['basemodel_root']) print('Created basemodel root.') inputs, outputs, datapath = prepareData(args) print("Inputs have shape: ", inputs.shape) print("Outputs have shape: ", outputs.shape) print("Total number of notes detected in input set ", np.sum(outputs)) print("Number of 1s in output: ", sum(sum(outputs == 1))) print("Number of 0s in output: ", sum(sum(outputs == 0))) print("Size of outputs: ", outputs.size) print("=> 1s should be weighted ", sum(sum(outputs == 0)) / sum(sum(outputs == 1))) input_level = np.mean(inputs) print("Average sound level: ", input_level) np.append(input_level, np.min(inputs)) np.append(input_level, np.max(inputs)) np.append(input_level, np.percentile(inputs, 25)) np.append(input_level, np.percentile(inputs, 50)) np.append(input_level, np.percentile(inputs, 75)) np.save(os.path.join(args['checkpoint_root'], "input_level"), input_level) exit() visualize_input(inputs, save_path=os.path.join(args['checkpoint_root'], 'input_heatmap.png')) # initialize the amt model, and do an initial training at = AMTNetwork(args) baseModelPath = os.path.join(args['basemodel_root'], 'basemodel') evaluatePath = os.path.join(args['checkpoint_root'], 'diagram') if args['train_basemodel']: # initial training, with clean data: print("training initial basemodel") at.compilation(outputs, save_path=os.path.join(args['checkpoint_root'], 'balance_weight.png')) at.train(inputs, outputs, args=args, epochs=args['epochs_on_clean'], train_descr='initial') at.save(model_path=baseModelPath) else: print("load existing basemodel") # Load Basemodel: bm = AMTNetwork(args) bm.load(baseModelPath) bm.compilation(outputs, save_path=os.path.join(args['checkpoint_root'], 'balance_weight.png')) # Noise Model to Train: at.load(baseModelPath) at.compilation(outputs, save_path=os.path.join(args['checkpoint_root'], 'balance_weight.png')) # initialize noiser: noise_generator = Noiser(noise_type="gaussian", noise_size=args['input_shape']) # Track f1 scores of the basemodel that is not further trained to noise print("computing initial basemodel scores") basemodel_score = bm.getscores(inputs, outputs) print('scores of basemodel', basemodel_score) # Save Noise levels noise_levels = np.zeros(shape=1) noise_level = args['noise_initial_level'] # Initialize base loss idx = np.random.randint(0, inputs.shape[0], args['noise_frames_per_epoch']) input_clean = inputs[idx] y = outputs[idx] res_old = at.evaluate_old(input_clean, y) # loop over various noise epochs: for noiseEpoch in range(args['noise_epochs']): # a. Generate noise candidate (only) with current noise level [advTrain] this_noise = noise_generator.generate(args['noise_frames_per_epoch']) # indices of data samples to be noised. idx = np.random.randint(0, inputs.shape[0], args['noise_frames_per_epoch']) classi_change = 0.00 while True: # Combine noise with clean data (noise and audio) noisy_X = inputs[idx] + noise_level * this_noise print("current noise level before test", noise_level) noisy_Xold = inputs[idx] + noise_levels[noiseEpoch] * this_noise print("current old noise level before test", noise_levels[noiseEpoch]) y = outputs[idx] classi_change = at.evaluation(noisy_X, res_old, y) print("classifier changed by", classi_change) if noise_level > 10e8 or noise_level < 10e-8: print("Noise Level is: ", noise_level, " in epoch ", noiseEpoch) print("BREAK because of size threshold") break elif classi_change > args['max_difficulty_on_noisy']: # “too hard for AMT” -> decrease noise level print("too hard") noise_level /= args['noise_decrease_factor'] print('Current noise level' + str(float(noise_level)) + ' in epoch ' + str(noiseEpoch)) continue # Jump to the next cycle elif classi_change < args['min_difficulty_on_noisy']: # “too easy for AMT” -> increase noise level noise_level *= args['noise_increase_factor'] print("too easy") print('Current noise level' + str(float(noise_level)) + ' in epoch ' + str(noiseEpoch)) continue # Jump to the next cycle else: print("Noise Level is: ", noise_level, " in epoch ", noiseEpoch) print("Noise Level accepted for training") # if we reach this point, the classi_perf is in the defined interval # => Exit the while loop and train the amt with the new noisy data break # appending current noise level before training to numpy array "noise_levels" noise_levels = np.append(noise_levels, noise_level) # Train with noisy samples (for a given number of epochs, with intermed. Result saved) this_noise = noise_generator.generate(inputs.shape[0]) noisy_inputs = inputs + np.random.uniform(0, noise_level, 1) * this_noise print("start training") at.train(noisy_inputs, outputs, args=args, epochs=args['epochs_on_noisy'], train_descr='noisy_iter_' + str(noiseEpoch)) # compute loss for next loop evaluation res_old = at.evaluate_old(noisy_X, y) bm_pred = bm.getscores(noisy_inputs, outputs) print("score of basemodel on noisy data", bm_pred) basemodel_score = np.append(basemodel_score, bm_pred) np.save(os.path.join(args['checkpoint_root'], "noise_levels"), noise_levels) np.save(os.path.join(args['checkpoint_root'], "bm_score"), basemodel_score) if noiseEpoch != 0 and ((noiseEpoch & (noiseEpoch - 1)) == 0): y_pred = at.transcribe(noisy_inputs) print(np.max(y_pred, axis=1)) y_pred = np.around(y_pred, decimals=0) print(y_pred.shape) y_true = outputs print(y_true.shape) final_score(y_pred=y_pred, y_true=y_true, description=str(noiseEpoch)) # Save np array of noise levels np.save(os.path.join(args['checkpoint_root'], "noise_levels"), noise_levels) print("all noise levels saved") np.save(os.path.join(args['checkpoint_root'], "bm_score"), basemodel_score) print("all basemodel scores on noise levels saved") # Final evaluation: final_score(y_pred=y_pred, y_true=y_true, description='final') # pitch_confusion(y_pred=y_pred, y_true=y_true, save_path=evaluatePath, description='final') print("DONE.")
import matplotlib import datetime import inspect import os import numpy as np from evaluate import final_score from extractfeatures import prepareData from model import Noiser, AMTNetwork from visualize import visualize_input matplotlib.use('Agg') if __name__ == '__main__': proj_root = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), '..') # Define a parameter structure args args = { # model parameters: 'model_name': 'baseline', 'init_lr': 1e-1, 'lr_decay': 'linear', # parameters for audio 'bin_multiple': 3, 'residual': 'False', 'hop_length': 512, 'sr': 16000, 'spec_type': 'cqt', 'min_midi': 37, # 21 corresponds to A0 (lowest tone on a "normal" piano), 27.5Hz 'max_midi': 92, # 108 corresponds to C8 (highest tone on a "normal" piano), 4.2kHz 'window_size': 7, # choose higher value than 5 # training parameters: 'train_basemodel': True, 'epochs_on_clean': 1000, 'epochs_on_noisy': 50, 'noise_epochs': 10, 'min_difficulty_on_noisy': 0.19, 'max_difficulty_on_noisy': 0.60, # noise parameters: 'noise_type': 'simplistic', 'noise_frames_per_epoch': 100, 'noise_initial_level': 0.03, 'noise_increase_factor': 2.5, 'noise_decrease_factor': 2, 'balance_classes': True, # directories: 'proj_root': proj_root, # - root directory of maps (with substructure as given in maps): 'wav_dir': os.path.join(proj_root, 'Audiodaten'), # - directory to store checkpoint files. All files are stored in this directory: 'checkpoint_root': os.path.join(proj_root, 'Checkpoints', 'train' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')), 'basemodel_root': os.path.join(proj_root, 'Basemodel'), 'maxFramesPerFile': -1, # set to -1 to ignore 'maxFrames': -1 # set to -1 to ignore } # derived parameters: args['note_range'] = args['max_midi'] - args['min_midi'] + 1 args['feature_bins'] = args['note_range'] * args['bin_multiple'] args['input_shape'] = (args['window_size'], args['feature_bins']) args['input_shape_channels'] = (args['window_size'], args['feature_bins'], 1) args['n_bins'] = args['note_range'] * args['bin_multiple'] if os.path.exists(args['checkpoint_root']): print("WARNING: Checkpoint root directory already exists!!") else: os.mkdir(args['checkpoint_root']) print('Created checkpoint root.') if os.path.exists(args['basemodel_root']): print("WARNING: Basemodel root directory already exists!!") else: os.mkdir(args['basemodel_root']) print('Created basemodel root.') inputs, outputs, datapath = prepareData(args) print("Inputs have shape: ", inputs.shape) print("Outputs have shape: ", outputs.shape) print("Total number of notes detected in input set ", np.sum(outputs)) print("Number of 1s in output: ", sum(sum(outputs == 1))) print("Number of 0s in output: ", sum(sum(outputs == 0))) print("Size of outputs: ", outputs.size) print("=> 1s should be weighted ", sum(sum(outputs == 0)) / sum(sum(outputs == 1))) input_level = np.mean(inputs) print("Average sound level: ", input_level) np.append(input_level, np.min(inputs)) np.append(input_level, np.max(inputs)) np.append(input_level, np.percentile(inputs, 25)) np.append(input_level, np.percentile(inputs, 50)) np.append(input_level, np.percentile(inputs, 75)) np.save(os.path.join(args['checkpoint_root'], "input_level"), input_level) exit() visualize_input(inputs, save_path=os.path.join(args['checkpoint_root'], 'input_heatmap.png')) # initialize the amt model, and do an initial training at = AMTNetwork(args) baseModelPath = os.path.join(args['basemodel_root'], 'basemodel') evaluatePath = os.path.join(args['checkpoint_root'], 'diagram') if args['train_basemodel']: # initial training, with clean data: print("training initial basemodel") at.compilation(outputs, save_path=os.path.join(args['checkpoint_root'], 'balance_weight.png')) at.train(inputs, outputs, args=args, epochs=args['epochs_on_clean'], train_descr='initial') at.save(model_path=baseModelPath) else: print("load existing basemodel") # Load Basemodel: bm = AMTNetwork(args) bm.load(baseModelPath) bm.compilation(outputs, save_path=os.path.join(args['checkpoint_root'], 'balance_weight.png')) # Noise Model to Train: at.load(baseModelPath) at.compilation(outputs, save_path=os.path.join(args['checkpoint_root'], 'balance_weight.png')) # initialize noiser: noise_generator = Noiser(noise_type="gaussian", noise_size=args['input_shape']) # Track f1 scores of the basemodel that is not further trained to noise print("computing initial basemodel scores") basemodel_score = bm.getscores(inputs, outputs) print('scores of basemodel', basemodel_score) # Save Noise levels noise_levels = np.zeros(shape=1) noise_level = args['noise_initial_level'] # Initialize base loss idx = np.random.randint(0, inputs.shape[0], args['noise_frames_per_epoch']) input_clean = inputs[idx] y = outputs[idx] res_old = at.evaluate_old(input_clean, y) # loop over various noise epochs: for noiseEpoch in range(args['noise_epochs']): # a. Generate noise candidate (only) with current noise level [advTrain] this_noise = noise_generator.generate(args['noise_frames_per_epoch']) # indices of data samples to be noised. idx = np.random.randint(0, inputs.shape[0], args['noise_frames_per_epoch']) classi_change = 0.00 while True: # Combine noise with clean data (noise and audio) noisy_X = inputs[idx] + noise_level * this_noise print("current noise level before test", noise_level) noisy_Xold = inputs[idx] + noise_levels[noiseEpoch] * this_noise print("current old noise level before test", noise_levels[noiseEpoch]) y = outputs[idx] classi_change = at.evaluation(noisy_X, res_old, y) print("classifier changed by", classi_change) if noise_level > 10e8 or noise_level < 10e-8: print("Noise Level is: ", noise_level, " in epoch ", noiseEpoch) print("BREAK because of size threshold") break elif classi_change > args['max_difficulty_on_noisy']: # “too hard for AMT” -> decrease noise level print("too hard") noise_level /= args['noise_decrease_factor'] print('Current noise level' + str(float(noise_level)) + ' in epoch ' + str(noiseEpoch)) continue # Jump to the next cycle elif classi_change < args['min_difficulty_on_noisy']: # “too easy for AMT” -> increase noise level noise_level *= args['noise_increase_factor'] print("too easy") print('Current noise level' + str(float(noise_level)) + ' in epoch ' + str(noiseEpoch)) continue # Jump to the next cycle else: print("Noise Level is: ", noise_level, " in epoch ", noiseEpoch) print("Noise Level accepted for training") # if we reach this point, the classi_perf is in the defined interval # => Exit the while loop and train the amt with the new noisy data break # appending current noise level before training to numpy array "noise_levels" noise_levels = np.append(noise_levels, noise_level) # Train with noisy samples (for a given number of epochs, with intermed. Result saved) this_noise = noise_generator.generate(inputs.shape[0]) noisy_inputs = inputs + np.random.uniform(0, noise_level, 1) * this_noise print("start training") at.train(noisy_inputs, outputs, args=args, epochs=args['epochs_on_noisy'], train_descr='noisy_iter_' + str(noiseEpoch)) # compute loss for next loop evaluation res_old = at.evaluate_old(noisy_X, y) bm_pred = bm.getscores(noisy_inputs, outputs) print("score of basemodel on noisy data", bm_pred) basemodel_score = np.append(basemodel_score, bm_pred) np.save(os.path.join(args['checkpoint_root'], "noise_levels"), noise_levels) np.save(os.path.join(args['checkpoint_root'], "bm_score"), basemodel_score) if noiseEpoch != 0 and ((noiseEpoch & (noiseEpoch - 1)) == 0): y_pred = at.transcribe(noisy_inputs) print(np.max(y_pred, axis=1)) y_pred = np.around(y_pred, decimals=0) print(y_pred.shape) y_true = outputs print(y_true.shape) final_score(y_pred=y_pred, y_true=y_true, description=str(noiseEpoch)) # Save np array of noise levels np.save(os.path.join(args['checkpoint_root'], "noise_levels"), noise_levels) print("all noise levels saved") np.save(os.path.join(args['checkpoint_root'], "bm_score"), basemodel_score) print("all basemodel scores on noise levels saved") # Final evaluation: final_score(y_pred=y_pred, y_true=y_true, description='final') # pitch_confusion(y_pred=y_pred, y_true=y_true, save_path=evaluatePath, description='final') print("DONE.")
en
0.774701
# Define a parameter structure args # model parameters: # parameters for audio # 21 corresponds to A0 (lowest tone on a "normal" piano), 27.5Hz # 108 corresponds to C8 (highest tone on a "normal" piano), 4.2kHz # choose higher value than 5 # training parameters: # noise parameters: # directories: # - root directory of maps (with substructure as given in maps): # - directory to store checkpoint files. All files are stored in this directory: # set to -1 to ignore # set to -1 to ignore # derived parameters: # initialize the amt model, and do an initial training # initial training, with clean data: # Load Basemodel: # Noise Model to Train: # initialize noiser: # Track f1 scores of the basemodel that is not further trained to noise # Save Noise levels # Initialize base loss # loop over various noise epochs: # a. Generate noise candidate (only) with current noise level [advTrain] # indices of data samples to be noised. # Combine noise with clean data (noise and audio) # “too hard for AMT” -> decrease noise level # Jump to the next cycle # “too easy for AMT” -> increase noise level # Jump to the next cycle # if we reach this point, the classi_perf is in the defined interval # => Exit the while loop and train the amt with the new noisy data # appending current noise level before training to numpy array "noise_levels" # Train with noisy samples (for a given number of epochs, with intermed. Result saved) # compute loss for next loop evaluation # Save np array of noise levels # Final evaluation: # pitch_confusion(y_pred=y_pred, y_true=y_true, save_path=evaluatePath, description='final')
2.102736
2
_autoclimate/occupancy.py
rr326/ad_app_autoclimate
0
6613350
<gh_stars>0 import datetime as dt from typing import List from _autoclimate.utils import climate_name from adplus import Hass from dateutil import tz """ Create new sensors Reason: That way you do auto off if unoccupied since AND last_manual_change > X hours * unoccupied_since: * Last unoccupied * None if no data * datetime.max if currently occupied * last_manual_change * Timestamps as above # TODO * Offline - handle """ class Occupancy: UNOCCUPIED_SINCE_OCCUPIED_VALUE = dt.datetime(dt.MAXYEAR, 12, 29, tzinfo=tz.tzutc()) def __init__( self, hass: Hass, config: dict, appname: str, climates: list, test_mode: bool, ): self.hass = hass self.aconfig = config self.appname = appname self.test_mode = test_mode self.climates = climates self.hass.run_in(self.create_occupancy_sensors, 0) self.hass.run_in(self.init_occupancy_listeners, 0.1) def unoccupied_sensor_name(self, climate): return self.unoccupied_sensor_name_static(self.appname, climate) @staticmethod def unoccupied_sensor_name_static(appname, climate): return f"sensor.{appname}_{climate_name(climate)}_unoccupied_since" def create_occupancy_sensors(self, kwargs): # Unoccupied Since Sensors for climate in self.climates: unoccupied_sensor_name = self.unoccupied_sensor_name(climate) last_on_date = self.history_last_on_date(climate=climate) self.hass.update_state( unoccupied_sensor_name, state=last_on_date, attributes={ "freindly_name": f"{climate_name(climate)} - unoccupied since", "device_class": "timestamp", }, ) self.hass.log( f"Created sensor: {unoccupied_sensor_name}. Initial state: {last_on_date}" ) def init_occupancy_listeners(self, kwargs): """ This will create a different occupancy sensor for each climate, so if multiple climates have the same oc_sensor, you'll get multiple listeners. """ for climate in self.climates: oc_sensor = self.get_sensor(climate=climate) self.hass.log(f"listen_state: {oc_sensor}") self.hass.listen_state( self.update_occupancy_sensor, entity=oc_sensor, attribute="all", climate=climate, ) def update_occupancy_sensor(self, entity, attribute, old, new, kwargs): climate = kwargs["climate"] # self.hass.log(f'update_occupancy_sensor: {entity} -- {climate} -- {new} -- {attribute}') last_on_date = self.oc_sensor_val_to_last_on_date( new["state"], new["last_updated"] ) unoccupied_sensor_name = self.unoccupied_sensor_name(climate) self.hass.update_state( unoccupied_sensor_name, state=last_on_date, ) self.hass.log( f"update_occupancy_sensor - {unoccupied_sensor_name} - state: {last_on_date}" ) def get_sensor(self, climate=None, sensor=None): if climate and sensor: raise RuntimeError( f"Programming error - history_last_on_date: give climate OR sensor" ) elif climate is None and sensor is None: raise RuntimeError( f"Programming error - need a climate or sensor. Got None." ) elif sensor: return sensor else: try: oc_sensor = self.aconfig[climate]["occupancy_sensor"] except KeyError: raise RuntimeError(f"Unable to get occupancy_sensor for {climate}") return oc_sensor def oc_sensor_val_to_last_on_date(self, state, last_on_date): if state == "on": return self.UNOCCUPIED_SINCE_OCCUPIED_VALUE elif state in ["off", "unavailable"]: return last_on_date else: self.hass.log(f"Unexpected last_on_date state: {state}") # Error or offline return None def history_last_on_date(self, climate=None, sensor=None): state, duration_off, last_on_date = self.get_unoccupied_time_for( climate, sensor ) return self.oc_sensor_val_to_last_on_date(state, last_on_date) def get_unoccupied_time_for(self, climate=None, sensor=None): oc_sensor = self.get_sensor(climate=climate, sensor=sensor) state, duration_off, last_on_date = self._history_occupancy_info(oc_sensor) return state, duration_off, last_on_date @staticmethod def duration_off_static(hass, dateval): """ 0 - currently on > 0 - number hours off < 0 / None - Error """ if isinstance(dateval, str): dateval = dt.datetime.fromisoformat(dateval) if dateval.tzinfo is None: dateval.replace(tzinfo=tz.tzlocal()) now = hass.get_now() if dateval > now: return 0 duration_off_hours = round((now - dateval).total_seconds() / (60 * 60), 2) return duration_off_hours def _history_occupancy_info(self, sensor_id: str, days: int = 10): """ returns: state (on/off/unavailable), duration_off (hours float / None), last_on_date (datetime, None) state = state of occupancy sensor All based on an occupancy sensor's history data. { "entity_id": "binary_sensor.seattle_occupancy", "state": "off", # on/off/unavailable "attributes": { "friendly_name": "<NAME>", "device_class": "occupancy" }, "last_changed": "2020-10-28T13:10:47.384057+00:00", "last_updated": "2020-10-28T13:10:47.384057+00:00" } Note - it looks like the occupancy sensor properly handles offline by returning an "unavailble" status. (Unlike temp sensors, which show the last value.) """ data: List = self.hass.get_history(entity_id=sensor_id, days=days) # type: ignore if not data or len(data) == 0: self.hass.warn( f"get_history returned no data for entity: {sensor_id}. Exiting" ) return "error", None, None edata = data[0] # the get_history() fn doesn't say it guarantees sort (though it appears to be) edata = list(reversed(sorted(edata, key=lambda rec: rec["last_updated"]))) current_state = edata[0]["state"] if current_state == "on": return "on", None, None last_on_date = None now: dt.datetime = self.hass.get_now() # type: ignore for rec in edata: if rec.get("state") == "on": last_on_date = dt.datetime.fromisoformat(rec["last_updated"]) duration_off_hours = round( (now - last_on_date).total_seconds() / (60 * 60), 2 ) return current_state, duration_off_hours, last_on_date # Can not find a last on time. Give the total time shown. min_time_off = round( (now - dt.datetime.fromisoformat(edata[-1]["last_updated"])).seconds / (60 * 60), 2, ) return current_state, min_time_off, None
import datetime as dt from typing import List from _autoclimate.utils import climate_name from adplus import Hass from dateutil import tz """ Create new sensors Reason: That way you do auto off if unoccupied since AND last_manual_change > X hours * unoccupied_since: * Last unoccupied * None if no data * datetime.max if currently occupied * last_manual_change * Timestamps as above # TODO * Offline - handle """ class Occupancy: UNOCCUPIED_SINCE_OCCUPIED_VALUE = dt.datetime(dt.MAXYEAR, 12, 29, tzinfo=tz.tzutc()) def __init__( self, hass: Hass, config: dict, appname: str, climates: list, test_mode: bool, ): self.hass = hass self.aconfig = config self.appname = appname self.test_mode = test_mode self.climates = climates self.hass.run_in(self.create_occupancy_sensors, 0) self.hass.run_in(self.init_occupancy_listeners, 0.1) def unoccupied_sensor_name(self, climate): return self.unoccupied_sensor_name_static(self.appname, climate) @staticmethod def unoccupied_sensor_name_static(appname, climate): return f"sensor.{appname}_{climate_name(climate)}_unoccupied_since" def create_occupancy_sensors(self, kwargs): # Unoccupied Since Sensors for climate in self.climates: unoccupied_sensor_name = self.unoccupied_sensor_name(climate) last_on_date = self.history_last_on_date(climate=climate) self.hass.update_state( unoccupied_sensor_name, state=last_on_date, attributes={ "freindly_name": f"{climate_name(climate)} - unoccupied since", "device_class": "timestamp", }, ) self.hass.log( f"Created sensor: {unoccupied_sensor_name}. Initial state: {last_on_date}" ) def init_occupancy_listeners(self, kwargs): """ This will create a different occupancy sensor for each climate, so if multiple climates have the same oc_sensor, you'll get multiple listeners. """ for climate in self.climates: oc_sensor = self.get_sensor(climate=climate) self.hass.log(f"listen_state: {oc_sensor}") self.hass.listen_state( self.update_occupancy_sensor, entity=oc_sensor, attribute="all", climate=climate, ) def update_occupancy_sensor(self, entity, attribute, old, new, kwargs): climate = kwargs["climate"] # self.hass.log(f'update_occupancy_sensor: {entity} -- {climate} -- {new} -- {attribute}') last_on_date = self.oc_sensor_val_to_last_on_date( new["state"], new["last_updated"] ) unoccupied_sensor_name = self.unoccupied_sensor_name(climate) self.hass.update_state( unoccupied_sensor_name, state=last_on_date, ) self.hass.log( f"update_occupancy_sensor - {unoccupied_sensor_name} - state: {last_on_date}" ) def get_sensor(self, climate=None, sensor=None): if climate and sensor: raise RuntimeError( f"Programming error - history_last_on_date: give climate OR sensor" ) elif climate is None and sensor is None: raise RuntimeError( f"Programming error - need a climate or sensor. Got None." ) elif sensor: return sensor else: try: oc_sensor = self.aconfig[climate]["occupancy_sensor"] except KeyError: raise RuntimeError(f"Unable to get occupancy_sensor for {climate}") return oc_sensor def oc_sensor_val_to_last_on_date(self, state, last_on_date): if state == "on": return self.UNOCCUPIED_SINCE_OCCUPIED_VALUE elif state in ["off", "unavailable"]: return last_on_date else: self.hass.log(f"Unexpected last_on_date state: {state}") # Error or offline return None def history_last_on_date(self, climate=None, sensor=None): state, duration_off, last_on_date = self.get_unoccupied_time_for( climate, sensor ) return self.oc_sensor_val_to_last_on_date(state, last_on_date) def get_unoccupied_time_for(self, climate=None, sensor=None): oc_sensor = self.get_sensor(climate=climate, sensor=sensor) state, duration_off, last_on_date = self._history_occupancy_info(oc_sensor) return state, duration_off, last_on_date @staticmethod def duration_off_static(hass, dateval): """ 0 - currently on > 0 - number hours off < 0 / None - Error """ if isinstance(dateval, str): dateval = dt.datetime.fromisoformat(dateval) if dateval.tzinfo is None: dateval.replace(tzinfo=tz.tzlocal()) now = hass.get_now() if dateval > now: return 0 duration_off_hours = round((now - dateval).total_seconds() / (60 * 60), 2) return duration_off_hours def _history_occupancy_info(self, sensor_id: str, days: int = 10): """ returns: state (on/off/unavailable), duration_off (hours float / None), last_on_date (datetime, None) state = state of occupancy sensor All based on an occupancy sensor's history data. { "entity_id": "binary_sensor.seattle_occupancy", "state": "off", # on/off/unavailable "attributes": { "friendly_name": "<NAME>", "device_class": "occupancy" }, "last_changed": "2020-10-28T13:10:47.384057+00:00", "last_updated": "2020-10-28T13:10:47.384057+00:00" } Note - it looks like the occupancy sensor properly handles offline by returning an "unavailble" status. (Unlike temp sensors, which show the last value.) """ data: List = self.hass.get_history(entity_id=sensor_id, days=days) # type: ignore if not data or len(data) == 0: self.hass.warn( f"get_history returned no data for entity: {sensor_id}. Exiting" ) return "error", None, None edata = data[0] # the get_history() fn doesn't say it guarantees sort (though it appears to be) edata = list(reversed(sorted(edata, key=lambda rec: rec["last_updated"]))) current_state = edata[0]["state"] if current_state == "on": return "on", None, None last_on_date = None now: dt.datetime = self.hass.get_now() # type: ignore for rec in edata: if rec.get("state") == "on": last_on_date = dt.datetime.fromisoformat(rec["last_updated"]) duration_off_hours = round( (now - last_on_date).total_seconds() / (60 * 60), 2 ) return current_state, duration_off_hours, last_on_date # Can not find a last on time. Give the total time shown. min_time_off = round( (now - dt.datetime.fromisoformat(edata[-1]["last_updated"])).seconds / (60 * 60), 2, ) return current_state, min_time_off, None
en
0.711951
Create new sensors Reason: That way you do auto off if unoccupied since AND last_manual_change > X hours * unoccupied_since: * Last unoccupied * None if no data * datetime.max if currently occupied * last_manual_change * Timestamps as above # TODO * Offline - handle # Unoccupied Since Sensors This will create a different occupancy sensor for each climate, so if multiple climates have the same oc_sensor, you'll get multiple listeners. # self.hass.log(f'update_occupancy_sensor: {entity} -- {climate} -- {new} -- {attribute}') # Error or offline 0 - currently on > 0 - number hours off < 0 / None - Error returns: state (on/off/unavailable), duration_off (hours float / None), last_on_date (datetime, None) state = state of occupancy sensor All based on an occupancy sensor's history data. { "entity_id": "binary_sensor.seattle_occupancy", "state": "off", # on/off/unavailable "attributes": { "friendly_name": "<NAME>", "device_class": "occupancy" }, "last_changed": "2020-10-28T13:10:47.384057+00:00", "last_updated": "2020-10-28T13:10:47.384057+00:00" } Note - it looks like the occupancy sensor properly handles offline by returning an "unavailble" status. (Unlike temp sensors, which show the last value.) # type: ignore # the get_history() fn doesn't say it guarantees sort (though it appears to be) # type: ignore # Can not find a last on time. Give the total time shown.
2.667934
3