hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f7038a82901c73f8b97543c9ba73f6bc069364fd
1,496
py
Python
Code/word_jumbler.py
squeaky1273/CS-1.3-Core-Data-Structures
e4aeabebc757b65dae89b82b2c341b90cf9880b6
[ "MIT" ]
null
null
null
Code/word_jumbler.py
squeaky1273/CS-1.3-Core-Data-Structures
e4aeabebc757b65dae89b82b2c341b90cf9880b6
[ "MIT" ]
4
2020-02-18T00:17:01.000Z
2020-03-10T21:17:33.000Z
Code/word_jumbler.py
squeaky1273/CS-1.3-Core-Data-Structures
e4aeabebc757b65dae89b82b2c341b90cf9880b6
[ "MIT" ]
null
null
null
# Dictionary class Dict_word_jumbler(object): def __init__(self): self.dict = self.build_dict() def build_dict(self): """"Build a dictionary to hold all of the words/letters""" dic = {} f = open("/usr/share/dict/words", "r") word_list = f.readlines() for word in word_list: word = word.strip().lower() words = ''.join(sorted(word)) dic[words] = word return dic def unscramble(self, words): """Build a function to unscramble the letters""" for word in words: word = word.strip().lower() word_sorted = ''.join(sorted(word)) if word_sorted in self.dict: unscrambled = self.dict[word_sorted] print(unscrambled) else: return None if __name__ == '__main__': # Cartoon prompt for final jumble: # "Farley rolled on the barn floor because of his __-______." words = ['tefon', 'sokik', 'niumem', 'siconu'] jumble = Dict_word_jumbler() jumble.unscramble(words) # # "A bad way for a lawyer to learn the criminal justice system: _____ and _____." # words = ['laisa', 'laurr', 'bureek', 'prouot'] # jumble = Dict_word_jumbler() # jumble.unscramble(words) # # Cartoon prompt for final jumble: "What a dog house is: A ____ ___." # words = ['TARFD', 'JOBUM', 'TENJUK', 'LETHEM'] # jumble = Dict_word_jumbler() # jumble.unscramble(words)
34.790698
87
0.580214
class Dict_word_jumbler(object): def __init__(self): self.dict = self.build_dict() def build_dict(self): dic = {} f = open("/usr/share/dict/words", "r") word_list = f.readlines() for word in word_list: word = word.strip().lower() words = ''.join(sorted(word)) dic[words] = word return dic def unscramble(self, words): for word in words: word = word.strip().lower() word_sorted = ''.join(sorted(word)) if word_sorted in self.dict: unscrambled = self.dict[word_sorted] print(unscrambled) else: return None if __name__ == '__main__': words = ['tefon', 'sokik', 'niumem', 'siconu'] jumble = Dict_word_jumbler() jumble.unscramble(words)
true
true
f7038b41b4f2b57e449c6284c229a0c467b90e25
119
py
Python
pickle2.py
himanshuhat21/mcd
2603f7eb0926e2b47464e6913ea665d5dca6b767
[ "MIT" ]
null
null
null
pickle2.py
himanshuhat21/mcd
2603f7eb0926e2b47464e6913ea665d5dca6b767
[ "MIT" ]
null
null
null
pickle2.py
himanshuhat21/mcd
2603f7eb0926e2b47464e6913ea665d5dca6b767
[ "MIT" ]
null
null
null
import pickle dict1 = {'Python ':90,'Java ':95,'C++ ':85} f = open('bin)file.dat','wb') pickle.dump(dict1,f) f.close()
23.8
43
0.613445
import pickle dict1 = {'Python ':90,'Java ':95,'C++ ':85} f = open('bin)file.dat','wb') pickle.dump(dict1,f) f.close()
true
true
f7038c9082852a874f45dece3844cb48296d142a
7,393
py
Python
run/run_ResGraphNet.py
czw1296924847/ResGraphNet
1638236e4138719c324afc3137f31cfec8a9de64
[ "MIT" ]
null
null
null
run/run_ResGraphNet.py
czw1296924847/ResGraphNet
1638236e4138719c324afc3137f31cfec8a9de64
[ "MIT" ]
null
null
null
run/run_ResGraphNet.py
czw1296924847/ResGraphNet
1638236e4138719c324afc3137f31cfec8a9de64
[ "MIT" ]
null
null
null
""" Testing ResGraphNet """ import datetime import numpy as np import pandas as pd import torch import os import os.path as osp import matplotlib.pyplot as plt import sys sys.path.append("..") import func.cal as cal device = "cuda:0" if torch.cuda.is_available() else "cpu" # device = "cpu" l_x = 60 # Data sequence length l_y = 1 # Label sequence length lr = 0.0001 # Learning rate weight_decay = 5e-4 epochs = 4000 hidden_dim = 64 gnn_style = "ResGraphNet" save_fig = True # Whether to save picture save_txt = False # Whether to save txt save_np = True # Whether to save np file save_model = True # Whether to save network model ratio_train = 0.5 # Proportion of training datasets fig_size = (16, 12) ts_name_all = ["cli_dash", "HadCRUT5", "temp_month", "temp_year", "elect", "traffic", "sales"] ts_name_folder = "HadCRUT5" # Name of the folder where the data resides ts_name = "HadCRUT5_global" # Name of the selected time series iv = 1 # sampling interval, used for plotting curves way = "mean" # The style of plot curves of real data and predict results x_address = osp.join("../datasets", ts_name_folder, ts_name + ".npy") x = np.load(x_address) num = x.shape[0] # The length of time series result_address = osp.join("../result", ts_name, "ResGraphNet") if not(osp.exists(result_address)): os.makedirs(result_address) num_train = int(ratio_train * num) data_train, data_test = x[:num_train], x[num_train:num] # get training dataset and test dataset len_interp = l_y + 6 data_test_ = np.array(data_test[:-l_y].tolist() + data_test[-len_interp-l_y:-l_y].tolist() + data_test[-l_y:].tolist()) # Using Graph Neural network, prepare data information x_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style="arr") x_test, y_test = cal.create_inout_sequences(data_test_, l_x, l_y, style="arr") x_train = torch.from_numpy(x_train).float().to(device) x_test = torch.from_numpy(x_test).float().to(device) y_train = torch.from_numpy(y_train).float().to(device) y_test = torch.from_numpy(y_test).float().to(device) num_nodes = x_train.shape[0] + x_test.shape[0] num_train = x_train.shape[0] x = torch.cat((x_train, x_test), dim=0) y = torch.cat((y_train, y_test), dim=0) adm = cal.path_graph(num_nodes) # adm = cal.ts_un(num_nodes, 6) edge_index, edge_weight = cal.tran_adm_to_edge_index(adm) train_index = torch.arange(num_train, dtype=torch.long) test_index = torch.arange(num_train, num_nodes, dtype=torch.long) train_mask = cal.index_to_mask(train_index, num_nodes).to(device) test_mask = cal.index_to_mask(test_index, num_nodes).to(device) # Using ResGraphNet, predicting time series (The Proposed Network Model) model = cal.GNNTime(l_x, hidden_dim, l_y, edge_weight, gnn_style, num_nodes).to(device) criterion = torch.nn.MSELoss().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) edge_index = edge_index.to(device) start_time = datetime.datetime.now() print("Running, {}".format(gnn_style)) for epoch in range(epochs): model.train() optimizer.zero_grad() output = model(x, edge_index) output_train, y_train = output[train_mask], y[train_mask] train_loss = criterion(output_train[:, -1], y_train[:, -1]) train_loss.backward() optimizer.step() model.eval() y_test_1 = y[test_mask][:-len_interp-l_y, :] y_test_2 = y[test_mask][-l_y:, :] y_test = torch.cat((y_test_1, y_test_2), dim=0) output_test = output[test_mask][:-len_interp, :] test_loss = criterion(output_test[:, -1], y_test[:, -1]) train_true = y_train.detach().cpu().numpy()[:, -1] train_predict = output_train.detach().cpu().numpy()[:, -1] test_true = y_test.detach().cpu().numpy()[:, -1] test_predict = output_test.detach().cpu().numpy()[:, -1] r2_train = cal.get_r2_score(train_predict, train_true, axis=1) r2_test = cal.get_r2_score(test_predict, test_true, axis=1) if (epoch + 1) % 100 == 0: print("Epoch: {:05d} Loss_Train: {:.5f} Loss_Test: {:.5f} R2_Train: {:.7f} R2_Test: {:.7f}". format(epoch + 1, train_loss.item(), test_loss.item(), r2_train, r2_test)) # predict and plot future time series plot_predict = test_predict[-l_y:] plot_true = test_true[-l_y:] mse_plot = np.mean(np.square(plot_predict - plot_true)) print("mse_plot: {}".format(mse_plot)) cal.plot_spiral(plot_predict) # predict results in the coming year if save_fig: plt.savefig(osp.join(result_address, "future_predict.png")) cal.plot_spiral(plot_true) # true data in the coming year if save_fig: plt.savefig(osp.join(result_address, "future_true.png")) # calculate running time end_time = datetime.datetime.now() run_time = end_time - start_time # The running time of program # save model and numpy.file if save_model: torch.save(model, osp.join(result_address, "{}.pkl".format(gnn_style))) if save_np: np.save(osp.join(result_address, "train_true.npy"), train_true) np.save(osp.join(result_address, "test_true.npy"), test_true) np.save(osp.join(result_address, "train_predict_{}.npy".format(gnn_style)), train_predict) np.save(osp.join(result_address, "test_predict_{}.npy".format(gnn_style)), test_predict) # plot the error and results e_gnn = test_true - test_predict cal.plot_distribute(e_gnn, 40, 4, x_name="e") if save_fig: plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + "_error_distribution.png")) cal.plot_result(train_true, test_true, train_predict, test_predict, iv, way, fig_size) if save_fig: plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + ".png")) # print indicators rmse_train = cal.get_rmse(train_predict, train_true) rmse_test = cal.get_rmse(test_predict, test_true) r2_train = cal.get_r2_score(train_predict, train_true, axis=1) r2_test = cal.get_r2_score(test_predict, test_true, axis=1) print("{}: RMSE_Train={:.5f} RMSE_Test={:.5f} R2_Train={:.7f} R2_Test={:.7f}". format(gnn_style, rmse_train, rmse_test, r2_train, r2_test)) # The output results of each model are appended to the file if save_txt: info_txt_address = osp.join(result_address, "ResGraphNet_result.txt") # txt file address for saving parameter information info_df_address = osp.join(result_address, "ResGraphNet_result.csv") # csv file address for saving parameter information f = open(info_txt_address, 'a') if osp.getsize(info_txt_address) == 0: # add the name of each feature in the first line of the text f.write("gnn_style r2_test r2_train run_time l_x l_y hidden_dim lr epochs\n") f.write(str(gnn_style) + " ") f.write(str(r2_test) + " ") f.write(str(r2_train) + " ") f.write(str(run_time) + " ") f.write(str(l_x) + " ") f.write(str(l_y) + " ") f.write(str(hidden_dim) + " ") f.write(str(lr) + " ") f.write(str(epochs) + " ") f.write("\n") # Prepare for next running f.close() # close file info = np.loadtxt(info_txt_address, dtype=str) columns = info[0, :].tolist() values = info[1:, :] info_df = pd.DataFrame(values, columns=columns) info_df.to_csv(info_df_address) print() plt.show() print()
40.179348
126
0.685108
import datetime import numpy as np import pandas as pd import torch import os import os.path as osp import matplotlib.pyplot as plt import sys sys.path.append("..") import func.cal as cal device = "cuda:0" if torch.cuda.is_available() else "cpu" l_x = 60 l_y = 1 lr = 0.0001 weight_decay = 5e-4 epochs = 4000 hidden_dim = 64 gnn_style = "ResGraphNet" save_fig = True save_txt = False save_np = True save_model = True ratio_train = 0.5 fig_size = (16, 12) ts_name_all = ["cli_dash", "HadCRUT5", "temp_month", "temp_year", "elect", "traffic", "sales"] ts_name_folder = "HadCRUT5" ts_name = "HadCRUT5_global" iv = 1 way = "mean" x_address = osp.join("../datasets", ts_name_folder, ts_name + ".npy") x = np.load(x_address) num = x.shape[0] result_address = osp.join("../result", ts_name, "ResGraphNet") if not(osp.exists(result_address)): os.makedirs(result_address) num_train = int(ratio_train * num) data_train, data_test = x[:num_train], x[num_train:num] len_interp = l_y + 6 data_test_ = np.array(data_test[:-l_y].tolist() + data_test[-len_interp-l_y:-l_y].tolist() + data_test[-l_y:].tolist()) x_train, y_train = cal.create_inout_sequences(data_train, l_x, l_y, style="arr") x_test, y_test = cal.create_inout_sequences(data_test_, l_x, l_y, style="arr") x_train = torch.from_numpy(x_train).float().to(device) x_test = torch.from_numpy(x_test).float().to(device) y_train = torch.from_numpy(y_train).float().to(device) y_test = torch.from_numpy(y_test).float().to(device) num_nodes = x_train.shape[0] + x_test.shape[0] num_train = x_train.shape[0] x = torch.cat((x_train, x_test), dim=0) y = torch.cat((y_train, y_test), dim=0) adm = cal.path_graph(num_nodes) edge_index, edge_weight = cal.tran_adm_to_edge_index(adm) train_index = torch.arange(num_train, dtype=torch.long) test_index = torch.arange(num_train, num_nodes, dtype=torch.long) train_mask = cal.index_to_mask(train_index, num_nodes).to(device) test_mask = cal.index_to_mask(test_index, num_nodes).to(device) model = cal.GNNTime(l_x, hidden_dim, l_y, edge_weight, gnn_style, num_nodes).to(device) criterion = torch.nn.MSELoss().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) edge_index = edge_index.to(device) start_time = datetime.datetime.now() print("Running, {}".format(gnn_style)) for epoch in range(epochs): model.train() optimizer.zero_grad() output = model(x, edge_index) output_train, y_train = output[train_mask], y[train_mask] train_loss = criterion(output_train[:, -1], y_train[:, -1]) train_loss.backward() optimizer.step() model.eval() y_test_1 = y[test_mask][:-len_interp-l_y, :] y_test_2 = y[test_mask][-l_y:, :] y_test = torch.cat((y_test_1, y_test_2), dim=0) output_test = output[test_mask][:-len_interp, :] test_loss = criterion(output_test[:, -1], y_test[:, -1]) train_true = y_train.detach().cpu().numpy()[:, -1] train_predict = output_train.detach().cpu().numpy()[:, -1] test_true = y_test.detach().cpu().numpy()[:, -1] test_predict = output_test.detach().cpu().numpy()[:, -1] r2_train = cal.get_r2_score(train_predict, train_true, axis=1) r2_test = cal.get_r2_score(test_predict, test_true, axis=1) if (epoch + 1) % 100 == 0: print("Epoch: {:05d} Loss_Train: {:.5f} Loss_Test: {:.5f} R2_Train: {:.7f} R2_Test: {:.7f}". format(epoch + 1, train_loss.item(), test_loss.item(), r2_train, r2_test)) plot_predict = test_predict[-l_y:] plot_true = test_true[-l_y:] mse_plot = np.mean(np.square(plot_predict - plot_true)) print("mse_plot: {}".format(mse_plot)) cal.plot_spiral(plot_predict) if save_fig: plt.savefig(osp.join(result_address, "future_predict.png")) cal.plot_spiral(plot_true) if save_fig: plt.savefig(osp.join(result_address, "future_true.png")) end_time = datetime.datetime.now() run_time = end_time - start_time if save_model: torch.save(model, osp.join(result_address, "{}.pkl".format(gnn_style))) if save_np: np.save(osp.join(result_address, "train_true.npy"), train_true) np.save(osp.join(result_address, "test_true.npy"), test_true) np.save(osp.join(result_address, "train_predict_{}.npy".format(gnn_style)), train_predict) np.save(osp.join(result_address, "test_predict_{}.npy".format(gnn_style)), test_predict) e_gnn = test_true - test_predict cal.plot_distribute(e_gnn, 40, 4, x_name="e") if save_fig: plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + "_error_distribution.png")) cal.plot_result(train_true, test_true, train_predict, test_predict, iv, way, fig_size) if save_fig: plt.savefig(osp.join(result_address, ts_name + "_" + gnn_style + ".png")) rmse_train = cal.get_rmse(train_predict, train_true) rmse_test = cal.get_rmse(test_predict, test_true) r2_train = cal.get_r2_score(train_predict, train_true, axis=1) r2_test = cal.get_r2_score(test_predict, test_true, axis=1) print("{}: RMSE_Train={:.5f} RMSE_Test={:.5f} R2_Train={:.7f} R2_Test={:.7f}". format(gnn_style, rmse_train, rmse_test, r2_train, r2_test)) if save_txt: info_txt_address = osp.join(result_address, "ResGraphNet_result.txt") info_df_address = osp.join(result_address, "ResGraphNet_result.csv") f = open(info_txt_address, 'a') if osp.getsize(info_txt_address) == 0: f.write("gnn_style r2_test r2_train run_time l_x l_y hidden_dim lr epochs\n") f.write(str(gnn_style) + " ") f.write(str(r2_test) + " ") f.write(str(r2_train) + " ") f.write(str(run_time) + " ") f.write(str(l_x) + " ") f.write(str(l_y) + " ") f.write(str(hidden_dim) + " ") f.write(str(lr) + " ") f.write(str(epochs) + " ") f.write("\n") f.close() info = np.loadtxt(info_txt_address, dtype=str) columns = info[0, :].tolist() values = info[1:, :] info_df = pd.DataFrame(values, columns=columns) info_df.to_csv(info_df_address) print() plt.show() print()
true
true
f7038e23fe0fcedf0278b48b1b3477e90f408cec
8,573
py
Python
idea.py
JaworWr/Dynamic-inverse-kinematics
b9da50b88152682060075a44da940e6f98690a9a
[ "MIT" ]
null
null
null
idea.py
JaworWr/Dynamic-inverse-kinematics
b9da50b88152682060075a44da940e6f98690a9a
[ "MIT" ]
null
null
null
idea.py
JaworWr/Dynamic-inverse-kinematics
b9da50b88152682060075a44da940e6f98690a9a
[ "MIT" ]
null
null
null
import numpy as np def FNS(scores): domination = np.all(scores[:, None, :] <= scores[None, :, :], axis=2) # domination[i, j] = "i dominuje j" domination &= np.any(scores[:, None, :] < scores[None, :, :], axis=2) Nx = domination.sum(0) Pf = [] ranks = np.zeros(scores.shape[0]) r = 0 Q = np.nonzero(Nx == 0)[0] while Q.size > 0: Nx[Q] = -1 Pf.append(Q) ranks[Q] = r r += 1 for i in Q: Nx[domination[i, :]] -= 1 Q = np.nonzero(Nx == 0)[0] return Pf, ranks def crowding_distance(scores): indices = np.argsort(scores, 0) sorted_scores = np.take_along_axis(scores, indices, 0) cd = np.zeros(scores.shape[0]) for k in range(scores.shape[1]): if sorted_scores[-1, k] != sorted_scores[0, k]: cd[indices[[0, -1], k]] = np.inf cd[indices[1:-1, k]] += (sorted_scores[2:, k] - sorted_scores[:-2, k]) / ( sorted_scores[-1, k] - sorted_scores[0, k]) return cd def random_population(d, n, x_min, x_max): return np.hstack([np.random.uniform(x_min, x_max, (n, d))]) def tournament_selection(ranks, dists, n): candidates = np.random.choice(n, (n, 2), replace=True) mask = np.where( ranks[candidates[:, 0]] == ranks[candidates[:, 1]], dists[candidates[:, 0]] > dists[candidates[:, 1]], ranks[candidates[:, 0]] < ranks[candidates[:, 1]] ) result = candidates[:, 1] result[mask] = candidates[mask, 0] return result def crossover(x, p, eta): # simulated binary crossover n, d = x.shape l = n // 2 mask = np.random.random((l, d)) <= p m = np.sum(mask) mi = np.random.random(m) beta = np.where( mi < 0.5, np.power(2 * mi, 1. / (eta + 1.)), np.power(1. / (2. * (1 - mi)), 1. / (eta + 1.)) ) c1 = x[:l, :].copy() c2 = x[l:, :].copy() c1[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask] c2[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask] return np.vstack([c1, c2]) def mutation(x, x_min, x_max, p, eta): # polynomial mutation n, d = x.shape mask = np.random.random((n, d)) <= p if isinstance(x_min, np.ndarray): x_min = np.repeat(x_min[None, :], n, axis=0) x_min = x_min[mask] if isinstance(x_max, np.ndarray): x_max = np.repeat(x_max[None, :], n, axis=0) x_max = x_max[mask] m = np.sum(mask) mi = np.random.random(m) beta = np.where( mi < 0.5, np.power(2 * mi, 1. / (eta + 1.)) - 1., 1. - np.power(2. * (1 - mi), 1. / (eta + 1.)) ) y = x.copy() y[mask] = np.where( mi < 0.5, x[mask] + beta * (x[mask] - x_min), x[mask] + beta * (x_max - x[mask]) ) return y def elitist_selection(fronts, dists, to_take): taken = [] for front in fronts: if len(front) <= to_take: taken += list(front) if len(front) == to_take: break to_take -= len(front) else: indices = np.argsort(-dists[front])[:to_take] taken += list(front[indices]) break return taken def constraint_violation(constraints): n, d = constraints.shape sort_indices = np.argsort(constraints, 0) violations = np.zeros(n) for i in range(d): values, counts = np.unique(constraints[:, i], return_counts=True) # unikalne wartości są zwracane posortowane counts = np.cumsum(counts) counts = list(counts) if values[0] != 0: counts = [0] + counts for rank, (j, k) in enumerate(zip([0] + counts, counts + [len(counts)])): violations[sort_indices[j:k, i]] += rank return violations def evaluation(objective, n_constraints, population): obj_results = objective(population) constraint_values = obj_results[:, -n_constraints:] violation_measure = constraint_violation(constraint_values) scores = np.concatenate([obj_results[:, :-n_constraints], violation_measure[:, None]], 1) return scores def split_and_select(population, scores, n_f, n_inf): dists = crowding_distance(scores) mask_f = scores[:, -1] == 0 population_f = population[mask_f, :] scores_f = scores[mask_f, :] dists_f = dists[mask_f] population_inf = population[~mask_f, :] scores_inf = scores[~mask_f, :] dists_inf = dists[~mask_f] s_f = population_f.shape[0] s_inf = population_inf.shape[0] n = n_f + n_inf if s_f < n_f: to_take_f = s_f to_take_inf = n - s_f elif s_inf < n_inf: to_take_inf = s_inf to_take_f = n - s_inf else: to_take_f = n_f to_take_inf = n_inf fronts_f, ranks_f = FNS(scores_f) taken_f = elitist_selection(fronts_f, dists_f, to_take_f) fronts_inf, ranks_inf = FNS(scores_inf) taken_inf = elitist_selection(fronts_inf, dists_inf, to_take_inf) return population_f[taken_f, :], population_inf[taken_inf, :], scores_f[taken_f, :], scores_inf[taken_inf, :] def IDEA(objective, n_constraints, x_min, x_max, d, n, *args, **kwargs): population = random_population(d, n, x_min, x_max) return sub_IDEA(population, objective, n_constraints, x_min, x_max, n, *args, **kwargs) def dynamic_IDEA(objective, n_constraints, T, x_min, x_max, d, n, alpha_inf, *args, num_iterations_init, num_iterations, n_immigrants=0, **kwargs): population = random_population(d, n, x_min, x_max) print("=" * 80) print("t=0") print("=" * 80) t = 0 def round_objective(round_population): return objective(t, round_population) p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args, num_iterations=num_iterations_init, **kwargs) population_history = [p] score_history = [s] n_to_keep = n - n_immigrants n_inf = int(n_to_keep * alpha_inf) n_f = n_to_keep - n_inf for t in range(1, T): print("=" * 80) print(f"t={t}") print("=" * 80) population = p[-1, :, :] scores = s[-1, :, :] if n_immigrants > 0: population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf) immigrants = random_population(d, n_immigrants, x_min, x_max) population = np.vstack([population_f, population_inf, immigrants]) assert population.shape[0] == n p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args, num_iterations=num_iterations, **kwargs) population_history.append(p) score_history.append(s) return population_history, score_history def sub_IDEA(population, objective, n_constraints, x_min, x_max, n, alpha_inf, eta_c, eta_m, p_c, p_m, num_iterations, log_interval=10): n_inf = int(n * alpha_inf) n_f = n - n_inf populations = [] scores = evaluation(objective, n_constraints, population) scores_hist = [] fronts, ranks = FNS(scores) dists = crowding_distance(scores) def log_message(): count_f = population_f.shape[0] count_inf = population_inf.shape[0] print( f"Iteration {iter_}, " + f"#feasible: {count_f}, best: {scores_f[:, :-1].min(0) if count_f > 0 else '-'}, " + f"#infeasible: {count_inf}, best: {scores_inf.min(0) if count_inf > 0 else '-'}" ) for iter_ in range(num_iterations): parent_indices = tournament_selection(ranks, dists, n) offspring = crossover(population[parent_indices, :], p_c, eta_c) offspring = np.clip(offspring, x_min, x_max) offspring = mutation(offspring, x_min, x_max, p_m, eta_m) offspring_scores = evaluation(objective, n_constraints, offspring) population = np.vstack([population, offspring]) scores = np.vstack([scores, offspring_scores]) population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf) population = np.vstack([population_f, population_inf]) scores = np.vstack([scores_f, scores_inf]) fronts, ranks = FNS(scores) dists = crowding_distance(scores) populations.append(population.copy()) scores_hist.append(scores.copy()) if iter_ % log_interval == 0: log_message() log_message() return np.stack(populations, 0), np.stack(scores_hist, 0)
32.973077
118
0.591508
import numpy as np def FNS(scores): domination = np.all(scores[:, None, :] <= scores[None, :, :], axis=2) domination &= np.any(scores[:, None, :] < scores[None, :, :], axis=2) Nx = domination.sum(0) Pf = [] ranks = np.zeros(scores.shape[0]) r = 0 Q = np.nonzero(Nx == 0)[0] while Q.size > 0: Nx[Q] = -1 Pf.append(Q) ranks[Q] = r r += 1 for i in Q: Nx[domination[i, :]] -= 1 Q = np.nonzero(Nx == 0)[0] return Pf, ranks def crowding_distance(scores): indices = np.argsort(scores, 0) sorted_scores = np.take_along_axis(scores, indices, 0) cd = np.zeros(scores.shape[0]) for k in range(scores.shape[1]): if sorted_scores[-1, k] != sorted_scores[0, k]: cd[indices[[0, -1], k]] = np.inf cd[indices[1:-1, k]] += (sorted_scores[2:, k] - sorted_scores[:-2, k]) / ( sorted_scores[-1, k] - sorted_scores[0, k]) return cd def random_population(d, n, x_min, x_max): return np.hstack([np.random.uniform(x_min, x_max, (n, d))]) def tournament_selection(ranks, dists, n): candidates = np.random.choice(n, (n, 2), replace=True) mask = np.where( ranks[candidates[:, 0]] == ranks[candidates[:, 1]], dists[candidates[:, 0]] > dists[candidates[:, 1]], ranks[candidates[:, 0]] < ranks[candidates[:, 1]] ) result = candidates[:, 1] result[mask] = candidates[mask, 0] return result def crossover(x, p, eta): n, d = x.shape l = n // 2 mask = np.random.random((l, d)) <= p m = np.sum(mask) mi = np.random.random(m) beta = np.where( mi < 0.5, np.power(2 * mi, 1. / (eta + 1.)), np.power(1. / (2. * (1 - mi)), 1. / (eta + 1.)) ) c1 = x[:l, :].copy() c2 = x[l:, :].copy() c1[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask] c2[mask] = 0.5 * (1 + beta) * x[:l, :][mask] + 0.5 * (1 - beta) * x[l:, :][mask] return np.vstack([c1, c2]) def mutation(x, x_min, x_max, p, eta): n, d = x.shape mask = np.random.random((n, d)) <= p if isinstance(x_min, np.ndarray): x_min = np.repeat(x_min[None, :], n, axis=0) x_min = x_min[mask] if isinstance(x_max, np.ndarray): x_max = np.repeat(x_max[None, :], n, axis=0) x_max = x_max[mask] m = np.sum(mask) mi = np.random.random(m) beta = np.where( mi < 0.5, np.power(2 * mi, 1. / (eta + 1.)) - 1., 1. - np.power(2. * (1 - mi), 1. / (eta + 1.)) ) y = x.copy() y[mask] = np.where( mi < 0.5, x[mask] + beta * (x[mask] - x_min), x[mask] + beta * (x_max - x[mask]) ) return y def elitist_selection(fronts, dists, to_take): taken = [] for front in fronts: if len(front) <= to_take: taken += list(front) if len(front) == to_take: break to_take -= len(front) else: indices = np.argsort(-dists[front])[:to_take] taken += list(front[indices]) break return taken def constraint_violation(constraints): n, d = constraints.shape sort_indices = np.argsort(constraints, 0) violations = np.zeros(n) for i in range(d): values, counts = np.unique(constraints[:, i], return_counts=True) counts = np.cumsum(counts) counts = list(counts) if values[0] != 0: counts = [0] + counts for rank, (j, k) in enumerate(zip([0] + counts, counts + [len(counts)])): violations[sort_indices[j:k, i]] += rank return violations def evaluation(objective, n_constraints, population): obj_results = objective(population) constraint_values = obj_results[:, -n_constraints:] violation_measure = constraint_violation(constraint_values) scores = np.concatenate([obj_results[:, :-n_constraints], violation_measure[:, None]], 1) return scores def split_and_select(population, scores, n_f, n_inf): dists = crowding_distance(scores) mask_f = scores[:, -1] == 0 population_f = population[mask_f, :] scores_f = scores[mask_f, :] dists_f = dists[mask_f] population_inf = population[~mask_f, :] scores_inf = scores[~mask_f, :] dists_inf = dists[~mask_f] s_f = population_f.shape[0] s_inf = population_inf.shape[0] n = n_f + n_inf if s_f < n_f: to_take_f = s_f to_take_inf = n - s_f elif s_inf < n_inf: to_take_inf = s_inf to_take_f = n - s_inf else: to_take_f = n_f to_take_inf = n_inf fronts_f, ranks_f = FNS(scores_f) taken_f = elitist_selection(fronts_f, dists_f, to_take_f) fronts_inf, ranks_inf = FNS(scores_inf) taken_inf = elitist_selection(fronts_inf, dists_inf, to_take_inf) return population_f[taken_f, :], population_inf[taken_inf, :], scores_f[taken_f, :], scores_inf[taken_inf, :] def IDEA(objective, n_constraints, x_min, x_max, d, n, *args, **kwargs): population = random_population(d, n, x_min, x_max) return sub_IDEA(population, objective, n_constraints, x_min, x_max, n, *args, **kwargs) def dynamic_IDEA(objective, n_constraints, T, x_min, x_max, d, n, alpha_inf, *args, num_iterations_init, num_iterations, n_immigrants=0, **kwargs): population = random_population(d, n, x_min, x_max) print("=" * 80) print("t=0") print("=" * 80) t = 0 def round_objective(round_population): return objective(t, round_population) p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args, num_iterations=num_iterations_init, **kwargs) population_history = [p] score_history = [s] n_to_keep = n - n_immigrants n_inf = int(n_to_keep * alpha_inf) n_f = n_to_keep - n_inf for t in range(1, T): print("=" * 80) print(f"t={t}") print("=" * 80) population = p[-1, :, :] scores = s[-1, :, :] if n_immigrants > 0: population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf) immigrants = random_population(d, n_immigrants, x_min, x_max) population = np.vstack([population_f, population_inf, immigrants]) assert population.shape[0] == n p, s = sub_IDEA(population, round_objective, n_constraints, x_min, x_max, n, alpha_inf, *args, num_iterations=num_iterations, **kwargs) population_history.append(p) score_history.append(s) return population_history, score_history def sub_IDEA(population, objective, n_constraints, x_min, x_max, n, alpha_inf, eta_c, eta_m, p_c, p_m, num_iterations, log_interval=10): n_inf = int(n * alpha_inf) n_f = n - n_inf populations = [] scores = evaluation(objective, n_constraints, population) scores_hist = [] fronts, ranks = FNS(scores) dists = crowding_distance(scores) def log_message(): count_f = population_f.shape[0] count_inf = population_inf.shape[0] print( f"Iteration {iter_}, " + f"#feasible: {count_f}, best: {scores_f[:, :-1].min(0) if count_f > 0 else '-'}, " + f"#infeasible: {count_inf}, best: {scores_inf.min(0) if count_inf > 0 else '-'}" ) for iter_ in range(num_iterations): parent_indices = tournament_selection(ranks, dists, n) offspring = crossover(population[parent_indices, :], p_c, eta_c) offspring = np.clip(offspring, x_min, x_max) offspring = mutation(offspring, x_min, x_max, p_m, eta_m) offspring_scores = evaluation(objective, n_constraints, offspring) population = np.vstack([population, offspring]) scores = np.vstack([scores, offspring_scores]) population_f, population_inf, scores_f, scores_inf = split_and_select(population, scores, n_f, n_inf) population = np.vstack([population_f, population_inf]) scores = np.vstack([scores_f, scores_inf]) fronts, ranks = FNS(scores) dists = crowding_distance(scores) populations.append(population.copy()) scores_hist.append(scores.copy()) if iter_ % log_interval == 0: log_message() log_message() return np.stack(populations, 0), np.stack(scores_hist, 0)
true
true
f7038fc0bc5d2f6096f6475db9c9bcecc2181c6d
8,118
py
Python
proxy/utils.py
rmarx/quic_iot
56e3c184bdaa20c065150c33851a5b6608987b8b
[ "MIT" ]
1
2020-09-28T11:34:28.000Z
2020-09-28T11:34:28.000Z
proxy/utils.py
rmarx/quic_iot
56e3c184bdaa20c065150c33851a5b6608987b8b
[ "MIT" ]
null
null
null
proxy/utils.py
rmarx/quic_iot
56e3c184bdaa20c065150c33851a5b6608987b8b
[ "MIT" ]
1
2021-04-05T16:19:51.000Z
2021-04-05T16:19:51.000Z
""" Misc functions. """ import ipaddress import datetime import hashlib import json import netaddr import netifaces import os import re import requests import scapy.all as sc import subprocess import sys import threading import time import traceback import uuid import server_config IPv4_REGEX = re.compile(r'[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}') sc.conf.verb = 0 # If non empty, then only devices with the following MAC addresses with be # inspected. Do not populate this list in production. For internal testing. TEST_OUI_LIST = [ # 'd83134', # Roku # '74f61c', # Danny's Pixel phone ] # Make sure Inspector's directory exits home_dir = os.path.join(os.path.expanduser('~'), 'princeton-iot-inspector') if not os.path.isdir(home_dir): os.mkdir(home_dir) def is_ipv4_addr(value): return IPv4_REGEX.match(value) def get_user_config(): """Returns the user_config dict.""" user_config_file = os.path.join( os.path.expanduser('~'), 'princeton-iot-inspector', 'iot_inspector_config.json' ) try: with open(user_config_file) as fp: return json.load(fp) except Exception: pass while True: user_key = requests.get(server_config.NEW_USER_URL).text.strip() # Make sure we're not getting server's error messages if len(user_key) == 32: break time.sleep(1) user_key = user_key.replace('-', '') secret_salt = str(uuid.uuid4()) with open(user_config_file, 'w') as fp: config_dict = { 'user_key': user_key, 'secret_salt': secret_salt } json.dump(config_dict, fp) return config_dict class TimeoutError(Exception): pass _lock = threading.Lock() def log(*args): log_str = '[%s] ' % datetime.datetime.today() log_str += ' '.join([str(v) for v in args]) log_file_path = os.path.join( os.path.expanduser('~'), 'princeton-iot-inspector', 'iot_inspector_logs.txt' ) print(log_str) with open(log_file_path, 'a') as fp: fp.write(log_str + '\n') def get_gateway_ip(timeout=10): """Returns the IP address of the gateway.""" return get_default_route(timeout)[0] def get_host_ip(timeout=10): """Returns the host's local IP (where IoT Inspector client runs).""" return get_default_route(timeout)[2] def _get_routes(): while True: sc.conf.route.resync() routes = sc.conf.route.routes if routes: return routes time.sleep(1) def get_default_route(): """Returns (gateway_ip, iface, host_ip).""" while True: routes = _get_routes() # Look for network = 0.0.0.0, netmask = 0.0.0.0 for default_route in routes: if default_route[0] == 0 and default_route[1] == 0: #return default_route[2:5] return ('192.168.5.1', 'wlan0', '192.168.5.7') log('get_default_route: retrying') time.sleep(1) def get_network_ip_range_windows(): default_iface = get_default_route() iface_filter = default_iface[1] print(default_iface) ip_set = set() iface_ip = iface_filter.ip iface_guid = iface_filter.guid for k, v in netifaces.ifaddresses(iface_guid).items(): if v[0]['addr'] == iface_ip: netmask = v[0]['netmask'] break network = netaddr.IPAddress(iface_ip) cidr = netaddr.IPAddress(netmask).netmask_bits() subnet = netaddr.IPNetwork('{}/{}'.format(network, cidr)) return ip_set def get_network_ip_range(): return set(['192.168.5.1', '192.168.5.6', '192.168.5.14', '192.168.5.15', '192.168.5.19']) def gget_network_ip_range(): """ Gets network IP range for the default interface specified by scapy.conf.iface """ ip_set = set() default_route = get_default_route() iface_str = '' if sys.platform.startswith('win'): iface_info = sc.conf.iface iface_str = iface_info.guid else: iface_str = sc.conf.iface netmask = None for k, v in netifaces.ifaddresses(iface_str).items(): if v[0]['addr'] == default_route[2]: netmask = v[0]['netmask'] break # Netmask is None when user runs VPN. if netmask is None: return set() gateway_ip = netaddr.IPAddress(default_route[0]) cidr = netaddr.IPAddress(netmask).netmask_bits() subnet = netaddr.IPNetwork('{}/{}'.format(gateway_ip, cidr)) for ip in subnet: ip_set.add(str(ip)) print('ip_set', ip_set) 1/0 return ip_set def get_my_mac(): """Returns the MAC addr of the default route interface.""" mac_set = get_my_mac_set(iface_filter=get_default_route()[1]) return mac_set.pop() def get_my_mac_set(iface_filter=None): """Returns a set of MAC addresses of the current host.""" out_set = set() if sys.platform.startswith("win"): from scapy.arch.windows import NetworkInterface if type(iface_filter) == NetworkInterface: out_set.add(iface_filter.mac) for iface in sc.get_if_list(): if iface_filter is not None and iface != iface_filter: continue try: mac = sc.get_if_hwaddr(iface) except Exception as e: continue else: out_set.add(mac) return out_set class _SafeRunError(object): """Used privately to denote error state in safe_run().""" def __init__(self): pass def restart_upon_crash(func, args=[], kwargs={}): """Restarts func upon unexpected exception and logs stack trace.""" while True: result = safe_run(func, args, kwargs) if isinstance(result, _SafeRunError): time.sleep(1) continue return result def safe_run(func, args=[], kwargs={}): """Returns _SafeRunError() upon failure and logs stack trace.""" try: return func(*args, **kwargs) except Exception as e: err_msg = '=' * 80 + '\n' err_msg += 'Time: %s\n' % datetime.datetime.today() err_msg += 'Function: %s %s %s\n' % (func, args, kwargs) err_msg += 'Exception: %s\n' % e err_msg += str(traceback.format_exc()) + '\n\n\n' with _lock: sys.stderr.write(err_msg + '\n') log(err_msg) return _SafeRunError() def get_device_id(device_mac, host_state): device_mac = str(device_mac).lower().replace(':', '') s = device_mac + str(host_state.secret_salt) return 's' + hashlib.sha256(s.encode('utf-8')).hexdigest()[0:10] def smart_max(v1, v2): """ Returns max value even if one value is None. Python cannot compare None and int, so build a wrapper around it. """ if v1 is None: return v2 if v2 is None: return v1 return max(v1, v2) def smart_min(v1, v2): """ Returns min value even if one of the value is None. By default min(None, x) == None per Python default behavior. """ if v1 is None: return v2 if v2 is None: return v1 return min(v1, v2) def get_min_max_tuple(min_max_tuple, value): """ Returns a new min_max_tuple with value considered. For example: min_max_tuple = (2, 3) print get_min_max_tuple(min_max_tuple, 4) We get back (2, 4). """ min_v, max_v = min_max_tuple min_v = smart_min(min_v, value) max_v = smart_max(max_v, value) return (min_v, max_v) def get_oui(mac): return mac.replace(':', '').lower()[0:6] def get_os(): """Returns 'mac', 'linux', or 'windows'. Raises RuntimeError otherwise.""" os_platform = sys.platform if os_platform.startswith('darwin'): return 'mac' if os_platform.startswith('linux'): return 'linux' if os_platform.startswith('win'): return 'windows' raise RuntimeError('Unsupported operating system.') def open_browser_on_windows(url): try: subprocess.call(['start', '', url], shell=True) except Exception: pass
22.363636
91
0.615176
import ipaddress import datetime import hashlib import json import netaddr import netifaces import os import re import requests import scapy.all as sc import subprocess import sys import threading import time import traceback import uuid import server_config IPv4_REGEX = re.compile(r'[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}\.[0-9]{0,3}') sc.conf.verb = 0 TEST_OUI_LIST = [ ] # Make sure Inspector's directory exits home_dir = os.path.join(os.path.expanduser('~'), 'princeton-iot-inspector') if not os.path.isdir(home_dir): os.mkdir(home_dir) def is_ipv4_addr(value): return IPv4_REGEX.match(value) def get_user_config(): user_config_file = os.path.join( os.path.expanduser('~'), 'princeton-iot-inspector', 'iot_inspector_config.json' ) try: with open(user_config_file) as fp: return json.load(fp) except Exception: pass while True: user_key = requests.get(server_config.NEW_USER_URL).text.strip() if len(user_key) == 32: break time.sleep(1) user_key = user_key.replace('-', '') secret_salt = str(uuid.uuid4()) with open(user_config_file, 'w') as fp: config_dict = { 'user_key': user_key, 'secret_salt': secret_salt } json.dump(config_dict, fp) return config_dict class TimeoutError(Exception): pass _lock = threading.Lock() def log(*args): log_str = '[%s] ' % datetime.datetime.today() log_str += ' '.join([str(v) for v in args]) log_file_path = os.path.join( os.path.expanduser('~'), 'princeton-iot-inspector', 'iot_inspector_logs.txt' ) print(log_str) with open(log_file_path, 'a') as fp: fp.write(log_str + '\n') def get_gateway_ip(timeout=10): return get_default_route(timeout)[0] def get_host_ip(timeout=10): return get_default_route(timeout)[2] def _get_routes(): while True: sc.conf.route.resync() routes = sc.conf.route.routes if routes: return routes time.sleep(1) def get_default_route(): while True: routes = _get_routes() for default_route in routes: if default_route[0] == 0 and default_route[1] == 0: return ('192.168.5.1', 'wlan0', '192.168.5.7') log('get_default_route: retrying') time.sleep(1) def get_network_ip_range_windows(): default_iface = get_default_route() iface_filter = default_iface[1] print(default_iface) ip_set = set() iface_ip = iface_filter.ip iface_guid = iface_filter.guid for k, v in netifaces.ifaddresses(iface_guid).items(): if v[0]['addr'] == iface_ip: netmask = v[0]['netmask'] break network = netaddr.IPAddress(iface_ip) cidr = netaddr.IPAddress(netmask).netmask_bits() subnet = netaddr.IPNetwork('{}/{}'.format(network, cidr)) return ip_set def get_network_ip_range(): return set(['192.168.5.1', '192.168.5.6', '192.168.5.14', '192.168.5.15', '192.168.5.19']) def gget_network_ip_range(): ip_set = set() default_route = get_default_route() iface_str = '' if sys.platform.startswith('win'): iface_info = sc.conf.iface iface_str = iface_info.guid else: iface_str = sc.conf.iface netmask = None for k, v in netifaces.ifaddresses(iface_str).items(): if v[0]['addr'] == default_route[2]: netmask = v[0]['netmask'] break if netmask is None: return set() gateway_ip = netaddr.IPAddress(default_route[0]) cidr = netaddr.IPAddress(netmask).netmask_bits() subnet = netaddr.IPNetwork('{}/{}'.format(gateway_ip, cidr)) for ip in subnet: ip_set.add(str(ip)) print('ip_set', ip_set) 1/0 return ip_set def get_my_mac(): mac_set = get_my_mac_set(iface_filter=get_default_route()[1]) return mac_set.pop() def get_my_mac_set(iface_filter=None): out_set = set() if sys.platform.startswith("win"): from scapy.arch.windows import NetworkInterface if type(iface_filter) == NetworkInterface: out_set.add(iface_filter.mac) for iface in sc.get_if_list(): if iface_filter is not None and iface != iface_filter: continue try: mac = sc.get_if_hwaddr(iface) except Exception as e: continue else: out_set.add(mac) return out_set class _SafeRunError(object): def __init__(self): pass def restart_upon_crash(func, args=[], kwargs={}): while True: result = safe_run(func, args, kwargs) if isinstance(result, _SafeRunError): time.sleep(1) continue return result def safe_run(func, args=[], kwargs={}): try: return func(*args, **kwargs) except Exception as e: err_msg = '=' * 80 + '\n' err_msg += 'Time: %s\n' % datetime.datetime.today() err_msg += 'Function: %s %s %s\n' % (func, args, kwargs) err_msg += 'Exception: %s\n' % e err_msg += str(traceback.format_exc()) + '\n\n\n' with _lock: sys.stderr.write(err_msg + '\n') log(err_msg) return _SafeRunError() def get_device_id(device_mac, host_state): device_mac = str(device_mac).lower().replace(':', '') s = device_mac + str(host_state.secret_salt) return 's' + hashlib.sha256(s.encode('utf-8')).hexdigest()[0:10] def smart_max(v1, v2): if v1 is None: return v2 if v2 is None: return v1 return max(v1, v2) def smart_min(v1, v2): if v1 is None: return v2 if v2 is None: return v1 return min(v1, v2) def get_min_max_tuple(min_max_tuple, value): min_v, max_v = min_max_tuple min_v = smart_min(min_v, value) max_v = smart_max(max_v, value) return (min_v, max_v) def get_oui(mac): return mac.replace(':', '').lower()[0:6] def get_os(): os_platform = sys.platform if os_platform.startswith('darwin'): return 'mac' if os_platform.startswith('linux'): return 'linux' if os_platform.startswith('win'): return 'windows' raise RuntimeError('Unsupported operating system.') def open_browser_on_windows(url): try: subprocess.call(['start', '', url], shell=True) except Exception: pass
true
true
f703901192427f703569a7943582237c9e551aeb
8,622
py
Python
py_trans/async_translator.py
Itz-fork/py-trans
2c35eb987ad990850dab55f00ec0f1c489b2589e
[ "MIT" ]
12
2021-09-11T16:27:24.000Z
2021-11-07T12:48:13.000Z
py_trans/async_translator.py
Itz-fork/py-trans
2c35eb987ad990850dab55f00ec0f1c489b2589e
[ "MIT" ]
1
2021-09-11T16:42:02.000Z
2021-09-12T04:14:04.000Z
py_trans/async_translator.py
Itz-fork/py-trans
2c35eb987ad990850dab55f00ec0f1c489b2589e
[ "MIT" ]
2
2021-10-03T10:17:31.000Z
2022-01-25T12:33:45.000Z
# Project: py-trans # Author: Itz-fork import aiohttp from .language_codes import _get_full_lang_name, _get_lang_code from .errors import check_internet_connection, UnknownErrorOccurred class Async_PyTranslator: """ Async PyTranslator Class Note: Before Trying to Translate Create an instance of this with provider (Default provider is google) Providers: google - Google Translate libre - LibreTranslate Engine translate.com - translate.com Translate my_memory - MyMemory Translate translate_dict - Translate Dict Argument(s): provider - Provider of Translator. (Must be a supported provider) Example(s): async_pytranslator = Async_PyTranslator(provider="google") """ def __init__(self, provider="google"): # Checking internet connection check_internet_connection() self.providers = ["google", "libre", "translate.com", "my_memory", "translate_dict"] if provider in self.providers: self.provider = provider else: self.provider = "google" # Headers self.lheader = {"Origin": "https://libretranslate.com", "Host": "libretranslate.com", "Referer": "https://libretranslate.com/"} # aiohttp session for translation purpose self.t_session = aiohttp.ClientSession() # aiohttp session for detecting source lang (This represents the laziness of me) self.d_session = aiohttp.ClientSession() async def translate(self, text, dest_lang="en"): """ Translator Function Argument(s): text - Source Text (Text that need to be translated) dest_lang - Destination Language Example(s): await async_pytranslator.translate(text="Hi, How are you?", dest_lang="si") """ if self.provider == "google": return await self.google_translate(text, dest_lang) elif self.provider == "libre": return await self.libre_translate(text, dest_lang) elif self.provider == "translate.com": return await self.translate_com(text, dest_lang) elif self.provider == "my_memory": return await self.my_memory(text, dest_lang) elif self.provider == "translate_dict": return await self.translate_dict(text, dest_lang) else: return # Google Translate async def google_translate(self, text, dest_lang): r_url = f"https://clients5.google.com/translate_a/t?client=dict-chrome-ex&sl=auto&tl={dest_lang}&q={text}" try: async with self.t_session as tr_ses: async with tr_ses.get(r_url) as get_req: request_resp = await get_req.json() translation = "" for tr in request_resp["sentences"]: try: translation += tr["trans"] except KeyError: pass except BaseException as e: raise UnknownErrorOccurred(e) origin_text = text origin_lang = await self.get_lang_name(request_resp['src']) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Google Translate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_text, "origin_lang": origin_lang} # Closing unwanted language detection aiohttp session await self.d_session.close() return tr_dict except Exception as e: return {"status": "failed", "error": e} # LibreTranslate async def _detect_lang(self, text, full_name=False): r_url = "https://libretranslate.com/detect" ld_data = {"q": str(text)} try: async with self.d_session as tr_ses: async with tr_ses.post(r_url, data=ld_data) as get_req: request_resp = await get_req.json() language_code = request_resp[0]["language"] except: # If can't detect the language let's think it's just english (RIP moment) language_code = "en" if full_name is False: return language_code else: return await self.get_lang_name(language_code) async def libre_translate(self, text, dest_lang): r_url = "https://libretranslate.com/translate" try: source_lang = await self._detect_lang(text=text, full_name=False) l_data = {"q": str(text), "source": source_lang, "target": dest_lang} async with self.t_session as tr_ses: async with tr_ses.post(r_url, data=l_data) as get_req: request_resp = await get_req.json() translation = request_resp["translatedText"] origin_lang = await self.get_lang_name(source_lang) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "LibreTranslate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Translate.com async def translate_com(self, text, dest_lang): r_url = "https://www.translate.com/translator/ajax_translate" try: source_lang = await self._detect_lang(text=text, full_name=False) tr_data = {"text_to_translate": str(text), "source_lang": source_lang, "translated_lang": dest_lang, "use_cache_only": "false"} async with self.t_session as tr_ses: async with tr_ses.post(url=r_url, data=tr_data) as get_req: request_resp = await get_req.json(content_type='text/html') translation = request_resp["translated_text"] origin_lang = await self.get_lang_name(text) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Translate.com", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_lang, "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # My Memory async def my_memory(self, text, dest_lang): r_url = "https://api.mymemory.translated.net/get" try: source_lang = await self._detect_lang(text=text, full_name=False) m_params = {"q": text, "langpair": f"{source_lang}|{dest_lang}"} async with self.t_session as tr_ses: async with tr_ses.get(r_url, params=m_params) as get_req: request_resp = await get_req.json() translation = request_resp["matches"][0]["translation"] origin_lang = await self.get_lang_name(source_lang) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "MyMemory", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Translate Dict async def translate_dict(self, text, dest_lang): r_url = f"https://t3.translatedict.com/1.php?p1=auto&p2={dest_lang}&p3={text}" try: async with self.t_session as tr_ses: async with tr_ses.get(r_url) as get_req: request_resp = await get_req.text() origin_lang = await self._detect_lang(text=text, full_name=True) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Translate Dict", "translation": request_resp, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Get Language Names async def get_lang_name(self, text): if len(text) == 2: return _get_full_lang_name(text) else: if len(text) <= 3: return "Not a full language name" else: return _get_lang_code(text)
47.373626
190
0.593714
import aiohttp from .language_codes import _get_full_lang_name, _get_lang_code from .errors import check_internet_connection, UnknownErrorOccurred class Async_PyTranslator: def __init__(self, provider="google"): check_internet_connection() self.providers = ["google", "libre", "translate.com", "my_memory", "translate_dict"] if provider in self.providers: self.provider = provider else: self.provider = "google" self.lheader = {"Origin": "https://libretranslate.com", "Host": "libretranslate.com", "Referer": "https://libretranslate.com/"} self.t_session = aiohttp.ClientSession() self.d_session = aiohttp.ClientSession() async def translate(self, text, dest_lang="en"): if self.provider == "google": return await self.google_translate(text, dest_lang) elif self.provider == "libre": return await self.libre_translate(text, dest_lang) elif self.provider == "translate.com": return await self.translate_com(text, dest_lang) elif self.provider == "my_memory": return await self.my_memory(text, dest_lang) elif self.provider == "translate_dict": return await self.translate_dict(text, dest_lang) else: return async def google_translate(self, text, dest_lang): r_url = f"https://clients5.google.com/translate_a/t?client=dict-chrome-ex&sl=auto&tl={dest_lang}&q={text}" try: async with self.t_session as tr_ses: async with tr_ses.get(r_url) as get_req: request_resp = await get_req.json() translation = "" for tr in request_resp["sentences"]: try: translation += tr["trans"] except KeyError: pass except BaseException as e: raise UnknownErrorOccurred(e) origin_text = text origin_lang = await self.get_lang_name(request_resp['src']) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Google Translate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_text, "origin_lang": origin_lang} await self.d_session.close() return tr_dict except Exception as e: return {"status": "failed", "error": e} async def _detect_lang(self, text, full_name=False): r_url = "https://libretranslate.com/detect" ld_data = {"q": str(text)} try: async with self.d_session as tr_ses: async with tr_ses.post(r_url, data=ld_data) as get_req: request_resp = await get_req.json() language_code = request_resp[0]["language"] except: language_code = "en" if full_name is False: return language_code else: return await self.get_lang_name(language_code) async def libre_translate(self, text, dest_lang): r_url = "https://libretranslate.com/translate" try: source_lang = await self._detect_lang(text=text, full_name=False) l_data = {"q": str(text), "source": source_lang, "target": dest_lang} async with self.t_session as tr_ses: async with tr_ses.post(r_url, data=l_data) as get_req: request_resp = await get_req.json() translation = request_resp["translatedText"] origin_lang = await self.get_lang_name(source_lang) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "LibreTranslate", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Translate.com async def translate_com(self, text, dest_lang): r_url = "https://www.translate.com/translator/ajax_translate" try: source_lang = await self._detect_lang(text=text, full_name=False) tr_data = {"text_to_translate": str(text), "source_lang": source_lang, "translated_lang": dest_lang, "use_cache_only": "false"} async with self.t_session as tr_ses: async with tr_ses.post(url=r_url, data=tr_data) as get_req: request_resp = await get_req.json(content_type='text/html') translation = request_resp["translated_text"] origin_lang = await self.get_lang_name(text) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Translate.com", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": origin_lang, "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # My Memory async def my_memory(self, text, dest_lang): r_url = "https://api.mymemory.translated.net/get" try: source_lang = await self._detect_lang(text=text, full_name=False) m_params = {"q": text, "langpair": f"{source_lang}|{dest_lang}"} async with self.t_session as tr_ses: async with tr_ses.get(r_url, params=m_params) as get_req: request_resp = await get_req.json() translation = request_resp["matches"][0]["translation"] origin_lang = await self.get_lang_name(source_lang) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "MyMemory", "translation": translation, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Translate Dict async def translate_dict(self, text, dest_lang): r_url = f"https://t3.translatedict.com/1.php?p1=auto&p2={dest_lang}&p3={text}" try: async with self.t_session as tr_ses: async with tr_ses.get(r_url) as get_req: request_resp = await get_req.text() origin_lang = await self._detect_lang(text=text, full_name=True) dest_lang_f = await self.get_lang_name(dest_lang) tr_dict = {"status": "success", "engine": "Translate Dict", "translation": request_resp, "dest_lang": dest_lang_f, "orgin_text": str(text), "origin_lang": origin_lang} return tr_dict except Exception as e: return {"status": "failed", "error": e} # Get Language Names async def get_lang_name(self, text): if len(text) == 2: return _get_full_lang_name(text) else: if len(text) <= 3: return "Not a full language name" else: return _get_lang_code(text)
true
true
f703904473e73730adab7124ddff717c8c6d6c0d
14,395
py
Python
axonius_api_client/api/assets/saved_query.py
Axonius/axonius_api_client
751fd6346ee9f361a73cd1399704b6d928f130ae
[ "MIT" ]
11
2020-01-31T00:28:26.000Z
2022-03-25T20:27:08.000Z
axonius_api_client/api/assets/saved_query.py
Axonius/axonius_api_client
751fd6346ee9f361a73cd1399704b6d928f130ae
[ "MIT" ]
83
2020-01-22T12:52:38.000Z
2022-02-07T16:20:18.000Z
axonius_api_client/api/assets/saved_query.py
Axonius/axonius_api_client
751fd6346ee9f361a73cd1399704b6d928f130ae
[ "MIT" ]
16
2019-08-23T17:44:31.000Z
2022-01-06T00:57:34.000Z
# -*- coding: utf-8 -*- """API for working with saved queries for assets.""" import warnings from typing import Generator, List, Optional, Union from ...constants.api import MAX_PAGE_SIZE from ...exceptions import NotFoundError, ResponseError, ApiWarning # from ...features import Features from ...parsers.tables import tablize_sqs from ...tools import check_gui_page_size, listify from .. import json_api from ..api_endpoints import ApiEndpoints from ..mixins import ChildMixins # XXX need update saved query class SavedQuery(ChildMixins): """API object for working with saved queries for the parent asset type. Examples: Create a ``client`` using :obj:`axonius_api_client.connect.Connect` and assume ``apiobj`` is either ``client.devices`` or ``client.users`` >>> apiobj = client.devices # or client.users * Get a saved query by name: :meth:`get_by_name` * Get a saved query by UUID: :meth:`get_by_uuid` * Get a saved query by tags: :meth:`get_by_tags` * Get all saved query tags: :meth:`get_tags` * Get all saved queries: :meth:`get` * Add a saved query: :meth:`add` * Delete a saved query by name: :meth:`delete_by_name` * Delete a saved query by UUID or SQ object: :meth:`delete` See Also: * Device assets :obj:`axonius_api_client.api.assets.devices.Devices` * User assets :obj:`axonius_api_client.api.assets.users.Users` """ def get_by_name(self, value: str) -> dict: """Get a saved query by name. Examples: Get a saved query by name >>> sq = apiobj.saved_query.get_by_name(name="test") >>> sq['tags'] ['Unmanaged Devices'] >>> sq['description'][:80] 'Devices that have been seen by at least one agent or at least one endpoint manag' >>> sq['view']['fields'] [ 'adapters', 'specific_data.data.name', 'specific_data.data.hostname', 'specific_data.data.last_seen', 'specific_data.data.network_interfaces.manufacturer', 'specific_data.data.network_interfaces.mac', 'specific_data.data.network_interfaces.ips', 'specific_data.data.os.type', 'labels' ] >>> sq['view']['query']['filter'][:80] '(specific_data.data.adapter_properties == "Agent") or (specific_data.data.adapte' Args: value: name of saved query """ data = self.get() found = [x for x in data if x["name"] == value] if found: return found[0] err = f"Saved Query with name of {value!r} not found" raise NotFoundError(tablize_sqs(data=data, err=err)) def get_by_uuid(self, value: str) -> dict: """Get a saved query by uuid. Examples: Get a saved query by uuid >>> sq = apiobj.saved_query.get_by_uuid(value="5f76721ce4557d5cba93f59e") Args: value: uuid of saved query """ data = self.get() found = [x for x in data if x["uuid"] == value] if found: return found[0] err = f"Saved Query with UUID of {value!r} not found" raise NotFoundError(tablize_sqs(data=data, err=err)) def get_by_tags(self, value: Union[str, List[str]], **kwargs) -> List[dict]: """Get saved queries by tags. Examples: Get all saved queries with tagged with 'AD' >>> sqs = apiobj.saved_query.get_by_tags('AD') >>> len(sqs) 2 Get all saved queries with tagged with 'AD' or 'AWS' >>> sqs = apiobj.saved_query.get_by_tags(['AD', 'AWS']) >>> len(sqs) 5 Args: value: list of tags **kwargs: passed to :meth:`get` Raises: :exc:`NotFoundError`: if no saved queries found tagged with supplied tags """ value = listify(value) rows = self.get(**kwargs) matches = [] known = set() for row in rows: for tag in row.get("tags", []): known.add(tag) if tag in value and row not in matches: matches.append(row) if not matches: valid = "\n " + "\n ".join(sorted(list(known))) msg = f"No saved query found with tags {value!r}, valid tags:{valid}" raise NotFoundError(msg) return matches def get_tags(self, **kwargs) -> List[str]: """Get all tags for saved queries. Examples: Get all known tags for all saved queries >>> tags = apiobj.saved_query.get_tags() >>> len(tags) 19 Args: **kwargs: passed to :meth:`get` """ rows = self.get(**kwargs) tags = [y for x in rows for y in x.get("tags", [])] return sorted(list(set(tags))) def get(self, generator: bool = False) -> Union[Generator[dict, None, None], List[dict]]: """Get all saved queries. Examples: Get all saved queries >>> sqs = apiobj.saved_query.get() >>> len(sqs) 39 Args: generator: return an iterator """ gen = self.get_generator() return gen if generator else list(gen) def get_generator(self) -> Generator[dict, None, None]: """Get Saved Queries using a generator.""" offset = 0 while True: rows = self._get(offset=offset) offset += len(rows) if not rows: break for row in rows: yield row.to_dict() def add( self, name: str, query: Optional[str] = None, tags: Optional[List[str]] = None, description: Optional[str] = None, expressions: Optional[List[str]] = None, fields: Optional[Union[List[str], str]] = None, fields_manual: Optional[Union[List[str], str]] = None, fields_regex: Optional[Union[List[str], str]] = None, fields_fuzzy: Optional[Union[List[str], str]] = None, fields_default: bool = True, fields_root: Optional[str] = None, sort_field: Optional[str] = None, sort_descending: bool = True, column_filters: Optional[dict] = None, gui_page_size: Optional[int] = None, private: bool = False, always_cached: bool = False, **kwargs, ) -> dict: """Create a saved query. Examples: Create a saved query using a :obj:`axonius_api_client.api.wizards.wizard.Wizard` >>> parsed = apiobj.wizard_text.parse(content="simple hostname contains blah") >>> query = parsed["query"] >>> expressions = parsed["expressions"] >>> sq = apiobj.saved_query.add( ... name="test", ... query=query, ... expressions=expressions, ... description="meep meep", ... tags=["nyuck1", "nyuck2", "nyuck3"], ... ) Notes: Saved Queries created without expressions will not be editable using the query wizard in the GUI. Use :obj:`axonius_api_client.api.wizards.wizard.Wizard` to produce a query and it's accordant expressions for the GUI query wizard. Args: name: name of saved query description: description tags: list of tags expressions: expressions built by :obj:`axonius_api_client.api.wizards.wizard.Wizard` query: query built by GUI or the CLI query wizard fields: fields to return for each asset (will be validated) fields_manual: fields to return for each asset (will NOT be validated) fields_regex: regex of fields to return for each asset fields_fuzzy: string to fuzzy match of fields to return for each asset fields_default: include the default fields defined in the parent asset object fields_root: include all fields of an adapter that are not complex sub-fields sort_field: sort the returned assets on a given field sort_descending: reverse the sort of the returned assets column_filters: column filters keyed as field_name:value gui_page_size: show N rows per page in GUI private: make this saved query private to current user """ query_expr: Optional[str] = kwargs.get("query_expr", None) or query gui_page_size = check_gui_page_size(size=gui_page_size) fields = self.parent.fields.validate( fields=fields, fields_manual=fields_manual, fields_regex=fields_regex, fields_default=fields_default, fields_root=fields_root, fields_fuzzy=fields_fuzzy, ) if sort_field: sort_field = self.parent.fields.get_field_name(value=sort_field) data_column_filters = {} if column_filters: for col_field, col_value in column_filters.items(): col_field = self.parent.fields.get_field_name(value=col_field) data_column_filters[col_field] = col_value dmeta = {} # TBD dmeta["enforcementFilter"] = None # TBD dmeta["uniqueAdapters"] = False # TBD data_query = {} data_query["filter"] = query or "" if query_expr: data_query["onlyExpressionsFilter"] = query_expr data_query["expressions"] = expressions or [] data_query["search"] = None # TBD data_query["meta"] = dmeta # TBD data_sort = {} data_sort["desc"] = sort_descending data_sort["field"] = sort_field or "" data_view = {} data_view["query"] = data_query data_view["sort"] = data_sort data_view["fields"] = fields data_view["pageSize"] = gui_page_size # 4.5 SEMI_BREAKING_CHANGE: now a list of dict # data_view["colFilters"] = listify(data_column_filters or {}) if data_column_filters: msg = f"Column filters structure has changed and is currently not supported by the API client." warnings.warn(message=msg, category=ApiWarning) # 4.5 SEMI_BREAKING_CHANGE: now a list of dict # data_view["colExcludedAdapters"] = listify({}) # TBD # data = {} # data["name"] = name # data["query_type"] = "saved" # data["description"] = description # data["view"] = data_view # data["tags"] = tags or [] # data["private"] = private added = self._add( name=name, description=description, view=data_view, private=private, always_cached=always_cached, tags=tags, ) return self.get_by_uuid(value=added.id) def delete_by_name(self, value: str, **kwargs) -> dict: """Delete a saved query by name. Examples: Delete the saved query by name >>> deleted = apiobj.saved_query.delete_by_name(name="test") Args: value: name of saved query to delete **kwargs: passed to :meth:`get_by_name` """ row = self.get_by_name(value=value, **kwargs) self._delete(uuid=row["uuid"]) return row def delete(self, rows: Union[str, List[str], List[dict]]) -> List[str]: """Delete saved queries. Args: rows: list of UUIDs or rows previously fetched saved queries to delete """ rows = listify(rows) deleted = [] for row in rows: uuid = row["uuid"] if isinstance(row, dict) else row self._delete(uuid=uuid) deleted.append(uuid) return deleted def _add( self, name: str, view: dict, description: Optional[str] = "", always_cached: bool = False, private: bool = False, tags: Optional[List[str]] = None, ) -> str: """Direct API method to create a saved query. Args: data: saved query metadata """ api_endpoint = ApiEndpoints.saved_queries.create request_obj = api_endpoint.load_request( name=name, view=view, description=description, always_cached=always_cached, private=private, tags=tags or [], ) return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE ) def _delete(self, uuid: str) -> json_api.generic.Metadata: """Direct API method to delete saved queries. Args: ids: list of uuid's to delete """ # NEW_IN: 05/31/21 cortex/develop try: api_endpoint = ApiEndpoints.saved_queries.delete request_obj = api_endpoint.load_request() return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE, uuid=uuid, ) except ResponseError as exc: if exc.is_incorrect_type: api_endpoint = ApiEndpoints.saved_queries.delete_4_3 request_obj = api_endpoint.load_request() return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE, uuid=uuid, ) raise def _get( self, limit: int = MAX_PAGE_SIZE, offset: int = 0 ) -> List[json_api.saved_queries.SavedQuery]: """Direct API method to get all users. Args: limit: limit to N rows per page offset: start at row N """ api_endpoint = ApiEndpoints.saved_queries.get request_obj = api_endpoint.load_request(page={"limit": limit, "offset": offset}) return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE )
34.93932
107
0.570823
import warnings from typing import Generator, List, Optional, Union from ...constants.api import MAX_PAGE_SIZE from ...exceptions import NotFoundError, ResponseError, ApiWarning from ...parsers.tables import tablize_sqs from ...tools import check_gui_page_size, listify from .. import json_api from ..api_endpoints import ApiEndpoints from ..mixins import ChildMixins class SavedQuery(ChildMixins): def get_by_name(self, value: str) -> dict: data = self.get() found = [x for x in data if x["name"] == value] if found: return found[0] err = f"Saved Query with name of {value!r} not found" raise NotFoundError(tablize_sqs(data=data, err=err)) def get_by_uuid(self, value: str) -> dict: data = self.get() found = [x for x in data if x["uuid"] == value] if found: return found[0] err = f"Saved Query with UUID of {value!r} not found" raise NotFoundError(tablize_sqs(data=data, err=err)) def get_by_tags(self, value: Union[str, List[str]], **kwargs) -> List[dict]: value = listify(value) rows = self.get(**kwargs) matches = [] known = set() for row in rows: for tag in row.get("tags", []): known.add(tag) if tag in value and row not in matches: matches.append(row) if not matches: valid = "\n " + "\n ".join(sorted(list(known))) msg = f"No saved query found with tags {value!r}, valid tags:{valid}" raise NotFoundError(msg) return matches def get_tags(self, **kwargs) -> List[str]: rows = self.get(**kwargs) tags = [y for x in rows for y in x.get("tags", [])] return sorted(list(set(tags))) def get(self, generator: bool = False) -> Union[Generator[dict, None, None], List[dict]]: gen = self.get_generator() return gen if generator else list(gen) def get_generator(self) -> Generator[dict, None, None]: offset = 0 while True: rows = self._get(offset=offset) offset += len(rows) if not rows: break for row in rows: yield row.to_dict() def add( self, name: str, query: Optional[str] = None, tags: Optional[List[str]] = None, description: Optional[str] = None, expressions: Optional[List[str]] = None, fields: Optional[Union[List[str], str]] = None, fields_manual: Optional[Union[List[str], str]] = None, fields_regex: Optional[Union[List[str], str]] = None, fields_fuzzy: Optional[Union[List[str], str]] = None, fields_default: bool = True, fields_root: Optional[str] = None, sort_field: Optional[str] = None, sort_descending: bool = True, column_filters: Optional[dict] = None, gui_page_size: Optional[int] = None, private: bool = False, always_cached: bool = False, **kwargs, ) -> dict: query_expr: Optional[str] = kwargs.get("query_expr", None) or query gui_page_size = check_gui_page_size(size=gui_page_size) fields = self.parent.fields.validate( fields=fields, fields_manual=fields_manual, fields_regex=fields_regex, fields_default=fields_default, fields_root=fields_root, fields_fuzzy=fields_fuzzy, ) if sort_field: sort_field = self.parent.fields.get_field_name(value=sort_field) data_column_filters = {} if column_filters: for col_field, col_value in column_filters.items(): col_field = self.parent.fields.get_field_name(value=col_field) data_column_filters[col_field] = col_value dmeta = {} dmeta["enforcementFilter"] = None dmeta["uniqueAdapters"] = False data_query = {} data_query["filter"] = query or "" if query_expr: data_query["onlyExpressionsFilter"] = query_expr data_query["expressions"] = expressions or [] data_query["search"] = None data_query["meta"] = dmeta data_sort = {} data_sort["desc"] = sort_descending data_sort["field"] = sort_field or "" data_view = {} data_view["query"] = data_query data_view["sort"] = data_sort data_view["fields"] = fields data_view["pageSize"] = gui_page_size if data_column_filters: msg = f"Column filters structure has changed and is currently not supported by the API client." warnings.warn(message=msg, category=ApiWarning) added = self._add( name=name, description=description, view=data_view, private=private, always_cached=always_cached, tags=tags, ) return self.get_by_uuid(value=added.id) def delete_by_name(self, value: str, **kwargs) -> dict: row = self.get_by_name(value=value, **kwargs) self._delete(uuid=row["uuid"]) return row def delete(self, rows: Union[str, List[str], List[dict]]) -> List[str]: rows = listify(rows) deleted = [] for row in rows: uuid = row["uuid"] if isinstance(row, dict) else row self._delete(uuid=uuid) deleted.append(uuid) return deleted def _add( self, name: str, view: dict, description: Optional[str] = "", always_cached: bool = False, private: bool = False, tags: Optional[List[str]] = None, ) -> str: api_endpoint = ApiEndpoints.saved_queries.create request_obj = api_endpoint.load_request( name=name, view=view, description=description, always_cached=always_cached, private=private, tags=tags or [], ) return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE ) def _delete(self, uuid: str) -> json_api.generic.Metadata: try: api_endpoint = ApiEndpoints.saved_queries.delete request_obj = api_endpoint.load_request() return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE, uuid=uuid, ) except ResponseError as exc: if exc.is_incorrect_type: api_endpoint = ApiEndpoints.saved_queries.delete_4_3 request_obj = api_endpoint.load_request() return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE, uuid=uuid, ) raise def _get( self, limit: int = MAX_PAGE_SIZE, offset: int = 0 ) -> List[json_api.saved_queries.SavedQuery]: api_endpoint = ApiEndpoints.saved_queries.get request_obj = api_endpoint.load_request(page={"limit": limit, "offset": offset}) return api_endpoint.perform_request( http=self.auth.http, request_obj=request_obj, asset_type=self.parent.ASSET_TYPE )
true
true
f703904bca3f1b62c246cc7e14f00f30d95c4d00
6,564
py
Python
producer/util.py
Keck-FOBOS/producer
6f2b0d3f29f62187bf593567081061e53ddb5a4e
[ "BSD-3-Clause" ]
null
null
null
producer/util.py
Keck-FOBOS/producer
6f2b0d3f29f62187bf593567081061e53ddb5a4e
[ "BSD-3-Clause" ]
null
null
null
producer/util.py
Keck-FOBOS/producer
6f2b0d3f29f62187bf593567081061e53ddb5a4e
[ "BSD-3-Clause" ]
null
null
null
""" Miscellaneous package utilities. .. include:: ../include/links.rst """ from itertools import chain, combinations from IPython import embed import numpy def all_subclasses(cls): """ Collect all the subclasses of the provided class. The search follows the inheritance to the highest-level class. Intermediate base classes are included in the returned set, but not the base class itself. Thanks to: https://stackoverflow.com/questions/3862310/how-to-find-all-the-subclasses-of-a-class-given-its-name Args: cls (object): The base class Returns: :obj:`set`: The unique set of derived classes, including any intermediate base classes in the inheritance thread. """ return set(cls.__subclasses__()).union( [s for c in cls.__subclasses__() for s in all_subclasses(c)]) def string_table(tbl, delimeter='print', has_header=True): """ Provided the array of data, format it with equally spaced columns and add a header (first row) and contents delimeter. Args: tbl (`numpy.ndarray`_): Array of string representations of the data to print. delimeter (:obj:`str`, optional): If the first row in the table containts the column headers (see ``has_header``), this sets the delimeter between first table row and the column data. Use ``'print'`` for a simple line of hyphens, anything else results in an ``rst`` style table formatting. has_header (:obj:`bool`, optional): The first row in ``tbl`` contains the column headers. Returns: :obj:`str`: Single long string with the data table. """ nrows, ncols = tbl.shape col_width = [numpy.amax([len(dij) for dij in dj]) for dj in tbl.T] _nrows = nrows start = 1 if delimeter != 'print': _nrows += 2 start += 1 if has_header: _nrows += 1 start += 1 row_string = ['']*_nrows for i in range(start,nrows+start-1): row_string[i] = ' '.join([tbl[1+i-start,j].ljust(col_width[j]) for j in range(ncols)]) if delimeter == 'print': # Heading row row_string[0] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) # Delimiter if has_header: row_string[1] = '-'*len(row_string[0]) return '\n'.join(row_string)+'\n' # For an rst table row_string[0] = ' '.join([ '='*col_width[j] for j in range(ncols)]) row_string[1] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) if has_header: row_string[2] = row_string[0] row_string[-1] = row_string[0] return '\n'.join(row_string)+'\n' def powerset(iterable, reverse=False): """" Construct an iterable that steps through all combinations of the provided iterable. This is pulled from the recipes provided by the itertools documentation. Examples: Get all unique combinations of the list [1,2,3]: >>> list(powerset([1,2,3])) [() (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)] Args: iterable (iterable): An iterable object reverse (:obj:`bool`, optional): Reverse the order (only roughly) of the iterable by placing the longer sequences first. Returns: `itertools.chain`: Iterable object that returns the sequence of combinations. """ rng = range(len(iterable)+1)[::-1] if reverse else range(len(iterable)+1) return chain.from_iterable(combinations(iterable, r) for r in rng) def polygon_winding_number(polygon, point): """ Determine the winding number of a 2D polygon about a point. The code does **not** check if the polygon is simple (no interesecting line segments). Algorithm taken from Numerical Recipes Section 21.4. Args: polygon (`numpy.ndarray`_): An Nx2 array containing the x,y coordinates of a polygon. The points should be ordered either counter-clockwise or clockwise. point (`numpy.ndarray`_): One or more points for the winding number calculation. Must be either a 2-element array for a single (x,y) pair, or an Nx2 array with N (x,y) points. Returns: :obj:`int`, `numpy.ndarray`_: The winding number of each point with respect to the provided polygon. Points inside the polygon have winding numbers of 1 or -1; see :func:`point_inside_polygon`. Raises: ValueError: Raised if ``polygon`` is not 2D, if ``polygon`` does not have two columns, or if the last axis of ``point`` does not have 2 and only 2 elements. """ # Check input shape is for 2D only if len(polygon.shape) != 2: raise ValueError('Polygon must be an Nx2 array.') if polygon.shape[1] != 2: raise ValueError('Polygon must be in two dimensions.') _point = numpy.atleast_2d(point) if _point.shape[1] != 2: raise ValueError('Point must contain two elements.') # Get the winding number nvert = polygon.shape[0] npnt = _point.shape[0] dl = numpy.roll(polygon, 1, axis=0)[None,:,:] - _point[:,None,:] dr = polygon[None,:,:] - point[:,None,:] dx = dl[...,0]*dr[...,1] - dl[...,1]*dr[...,0] indx_l = dl[...,1] > 0 indx_r = dr[...,1] > 0 wind = numpy.zeros((npnt, nvert), dtype=int) wind[indx_l & numpy.logical_not(indx_r) & (dx < 0)] = -1 wind[numpy.logical_not(indx_l) & indx_r & (dx > 0)] = 1 return numpy.sum(wind, axis=1)[0] if point.ndim == 1 else numpy.sum(wind, axis=1) def point_inside_polygon(polygon, point): """ Determine if one or more points is inside the provided polygon. Primarily a wrapper for :func:`polygon_winding_number`, that returns True for each point that is inside the polygon. Args: polygon (`numpy.ndarray`_): An Nx2 array containing the x,y coordinates of a polygon. The points should be ordered either counter-clockwise or clockwise. point (`numpy.ndarray`_): One or more points for the winding number calculation. Must be either a 2-element array for a single (x,y) pair, or an Nx2 array with N (x,y) points. Returns: :obj:`bool`, `numpy.ndarray`: Boolean indicating whether or not each point is within the polygon. """ return numpy.absolute(polygon_winding_number(polygon, point)) == 1
33.319797
104
0.61883
from itertools import chain, combinations from IPython import embed import numpy def all_subclasses(cls): return set(cls.__subclasses__()).union( [s for c in cls.__subclasses__() for s in all_subclasses(c)]) def string_table(tbl, delimeter='print', has_header=True): nrows, ncols = tbl.shape col_width = [numpy.amax([len(dij) for dij in dj]) for dj in tbl.T] _nrows = nrows start = 1 if delimeter != 'print': _nrows += 2 start += 1 if has_header: _nrows += 1 start += 1 row_string = ['']*_nrows for i in range(start,nrows+start-1): row_string[i] = ' '.join([tbl[1+i-start,j].ljust(col_width[j]) for j in range(ncols)]) if delimeter == 'print': row_string[0] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) if has_header: row_string[1] = '-'*len(row_string[0]) return '\n'.join(row_string)+'\n' row_string[0] = ' '.join([ '='*col_width[j] for j in range(ncols)]) row_string[1] = ' '.join([tbl[0,j].ljust(col_width[j]) for j in range(ncols)]) if has_header: row_string[2] = row_string[0] row_string[-1] = row_string[0] return '\n'.join(row_string)+'\n' def powerset(iterable, reverse=False): rng = range(len(iterable)+1)[::-1] if reverse else range(len(iterable)+1) return chain.from_iterable(combinations(iterable, r) for r in rng) def polygon_winding_number(polygon, point): if len(polygon.shape) != 2: raise ValueError('Polygon must be an Nx2 array.') if polygon.shape[1] != 2: raise ValueError('Polygon must be in two dimensions.') _point = numpy.atleast_2d(point) if _point.shape[1] != 2: raise ValueError('Point must contain two elements.') nvert = polygon.shape[0] npnt = _point.shape[0] dl = numpy.roll(polygon, 1, axis=0)[None,:,:] - _point[:,None,:] dr = polygon[None,:,:] - point[:,None,:] dx = dl[...,0]*dr[...,1] - dl[...,1]*dr[...,0] indx_l = dl[...,1] > 0 indx_r = dr[...,1] > 0 wind = numpy.zeros((npnt, nvert), dtype=int) wind[indx_l & numpy.logical_not(indx_r) & (dx < 0)] = -1 wind[numpy.logical_not(indx_l) & indx_r & (dx > 0)] = 1 return numpy.sum(wind, axis=1)[0] if point.ndim == 1 else numpy.sum(wind, axis=1) def point_inside_polygon(polygon, point): return numpy.absolute(polygon_winding_number(polygon, point)) == 1
true
true
f70390583a82b46fca6f1d21db1f09463d4fd79d
3,979
py
Python
generate_cdiscount_predictions.py
chundiliu/slim_for_Cdiscount
ea7f9d56072072c031094c12c803c63591066c6c
[ "Apache-2.0" ]
null
null
null
generate_cdiscount_predictions.py
chundiliu/slim_for_Cdiscount
ea7f9d56072072c031094c12c803c63591066c6c
[ "Apache-2.0" ]
null
null
null
generate_cdiscount_predictions.py
chundiliu/slim_for_Cdiscount
ea7f9d56072072c031094c12c803c63591066c6c
[ "Apache-2.0" ]
null
null
null
import math import tensorflow as tf import os import struct import pdb import numpy as np from datasets import dataset_factory from nets import nets_factory import nets.resnet_v2 as resnet_v2 from preprocessing import preprocessing_factory slim = tf.contrib.slim def merge_predictions(predictions_fn): ''' Merge predictions/logit scores for products that are the same. ''' out_f = open(predictions_fn + '_merged', 'wb') f = open(predictions_fn, 'r') line = f.readline().strip().split() curr_id = line[0] curr_scores = np.power(np.array([float(x) for x in line[1:]]), 3) num_elems = 1 line = f.readline().strip().split() while line != []: id = line[0] # raise elements to the third power, and then take the cubic root scores = np.power(np.array([float(x) for x in line[1:]]), 3) if id == curr_id: num_elems += 1 curr_scores += scores else: curr_scores = np.cbrt(curr_scores / float(num_elems)) for score in curr_scores: out_f.write(struct.pack('>f', score)) curr_scores = scores num_elems = 1 curr_id = id line = f.readline().strip().split() curr_scores = np.cbrt(curr_scores / float(num_elems)) for score in curr_scores: out_f.write(struct.pack('>f', score)) out_f.close() f.close() if __name__ == '__main__': checkpoint_dir = '/home/shunan/Code/Data/cdiscount/training' dataset_dir = '/home/shunan/Code/Data/cdiscount/tf_records' num_classes = 5270 image_size = 180 batch_size = 100 set_name = 'validation' data_sizes = {'train': 12195682, 'validation': 175611, 'test': 3095080} out_fn = os.path.join(dataset_dir, '{}_predictions.txt'.format(set_name)) checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) # loading the dataset dataset = dataset_factory.get_dataset('cdiscount', set_name, dataset_dir) # dataset provider to load data from the dataset. provider = slim.dataset_data_provider.DatasetDataProvider(dataset, shuffle=False, common_queue_capacity=2*batch_size, common_queue_min=batch_size) [image, label, product_id] = provider.get(['image', 'label', 'product_id']) # Pre-processing step. image_preprocessing_fn = preprocessing_factory.get_preprocessing('simple', is_training=False) image = image_preprocessing_fn(image, image_size, image_size) images, labels, product_ids = tf.train.batch([image, label, product_id], batch_size=batch_size, num_threads=1, capacity=5 * batch_size) # Get the model # network_fn = nets_factory.get_network_fn('resnet_v2_152', num_classes=num_classes, is_training=False) with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=0.)): logits, end_points = resnet_v2.resnet_v2_152(images, num_classes=num_classes, is_training=False) #Obtain the trainable variables and a saver variables_to_restore = slim.get_variables_to_restore() saver = tf.train.Saver(variables_to_restore) output_f = open(out_fn, 'w') with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) sess.run(tf.global_variables_initializer()) saver.restore(sess, checkpoint_file) num_iters = int(math.ceil(data_sizes[set_name] / float(batch_size))) num_last_batch = batch_size - ((num_iters * batch_size) - data_sizes[set_name]) for i in range(num_iters): output, ids = sess.run([logits, product_ids]) if i == num_iters - 1: output = output[:num_last_batch, :] ids = ids[:num_last_batch] for j in range(output.shape[0]): vec_str = [str(x) for x in output[j, :]] output_f.write(str(ids[j]) + ' ' + ' '.join(vec_str) + '\n') output_f.close()
35.526786
121
0.654938
import math import tensorflow as tf import os import struct import pdb import numpy as np from datasets import dataset_factory from nets import nets_factory import nets.resnet_v2 as resnet_v2 from preprocessing import preprocessing_factory slim = tf.contrib.slim def merge_predictions(predictions_fn): out_f = open(predictions_fn + '_merged', 'wb') f = open(predictions_fn, 'r') line = f.readline().strip().split() curr_id = line[0] curr_scores = np.power(np.array([float(x) for x in line[1:]]), 3) num_elems = 1 line = f.readline().strip().split() while line != []: id = line[0] scores = np.power(np.array([float(x) for x in line[1:]]), 3) if id == curr_id: num_elems += 1 curr_scores += scores else: curr_scores = np.cbrt(curr_scores / float(num_elems)) for score in curr_scores: out_f.write(struct.pack('>f', score)) curr_scores = scores num_elems = 1 curr_id = id line = f.readline().strip().split() curr_scores = np.cbrt(curr_scores / float(num_elems)) for score in curr_scores: out_f.write(struct.pack('>f', score)) out_f.close() f.close() if __name__ == '__main__': checkpoint_dir = '/home/shunan/Code/Data/cdiscount/training' dataset_dir = '/home/shunan/Code/Data/cdiscount/tf_records' num_classes = 5270 image_size = 180 batch_size = 100 set_name = 'validation' data_sizes = {'train': 12195682, 'validation': 175611, 'test': 3095080} out_fn = os.path.join(dataset_dir, '{}_predictions.txt'.format(set_name)) checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) dataset = dataset_factory.get_dataset('cdiscount', set_name, dataset_dir) provider = slim.dataset_data_provider.DatasetDataProvider(dataset, shuffle=False, common_queue_capacity=2*batch_size, common_queue_min=batch_size) [image, label, product_id] = provider.get(['image', 'label', 'product_id']) image_preprocessing_fn = preprocessing_factory.get_preprocessing('simple', is_training=False) image = image_preprocessing_fn(image, image_size, image_size) images, labels, product_ids = tf.train.batch([image, label, product_id], batch_size=batch_size, num_threads=1, capacity=5 * batch_size) with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=0.)): logits, end_points = resnet_v2.resnet_v2_152(images, num_classes=num_classes, is_training=False) variables_to_restore = slim.get_variables_to_restore() saver = tf.train.Saver(variables_to_restore) output_f = open(out_fn, 'w') with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) sess.run(tf.global_variables_initializer()) saver.restore(sess, checkpoint_file) num_iters = int(math.ceil(data_sizes[set_name] / float(batch_size))) num_last_batch = batch_size - ((num_iters * batch_size) - data_sizes[set_name]) for i in range(num_iters): output, ids = sess.run([logits, product_ids]) if i == num_iters - 1: output = output[:num_last_batch, :] ids = ids[:num_last_batch] for j in range(output.shape[0]): vec_str = [str(x) for x in output[j, :]] output_f.write(str(ids[j]) + ' ' + ' '.join(vec_str) + '\n') output_f.close()
true
true
f703910d9e118fa5006358175ab5d250b7cb827a
15,716
py
Python
src/collective/solr/browser/maintenance.py
IMIO/collective.solr
844219eb3968b34d2b83a7bd5f59340d676d149e
[ "ZPL-1.1" ]
null
null
null
src/collective/solr/browser/maintenance.py
IMIO/collective.solr
844219eb3968b34d2b83a7bd5f59340d676d149e
[ "ZPL-1.1" ]
null
null
null
src/collective/solr/browser/maintenance.py
IMIO/collective.solr
844219eb3968b34d2b83a7bd5f59340d676d149e
[ "ZPL-1.1" ]
null
null
null
# -*- coding: utf-8 -*- from logging import getLogger from time import time, strftime from BTrees.IIBTree import IITreeSet from Products.CMFCore.utils import getToolByName from Products.Five.browser import BrowserView from plone.uuid.interfaces import IUUID, IUUIDAware from zope.interface import implementer from zope.component import queryUtility, queryAdapter from collective.solr.indexer import DefaultAdder from collective.solr.flare import PloneFlare from collective.solr.interfaces import ISolrConnectionManager from collective.solr.interfaces import ISolrMaintenanceView from collective.solr.interfaces import ISolrAddHandler from collective.solr.interfaces import ICheckIndexable from collective.solr.indexer import SolrIndexProcessor from collective.solr.indexer import boost_values from collective.solr.parser import parse_date_as_datetime from collective.solr.parser import SolrResponse from collective.solr.parser import unmarshallers from collective.solr.utils import findObjects from collective.solr.utils import prepareData logger = getLogger("collective.solr.maintenance") MAX_ROWS = 1000000000 try: from time import process_time except ImportError: # Python < 3.8 from time import clock as process_time def timer(func=time): """set up a generator returning the elapsed time since the last call""" def gen(last=func()): while True: elapsed = func() - last last = func() yield "%.3fs" % elapsed return gen() def checkpointIterator(function, interval=100): """the iterator will call the given function for every nth invocation""" counter = 0 while True: counter += 1 if counter % interval == 0: function() yield None def notimeout(func): """decorator to prevent long-running solr tasks from timing out""" def wrapper(*args, **kw): """wrapper with random docstring so ttw access still works""" manager = queryUtility(ISolrConnectionManager) manager.setTimeout(None, lock=True) try: return func(*args, **kw) finally: manager.setTimeout(None, lock=False) return wrapper @implementer(ISolrMaintenanceView) class SolrMaintenanceView(BrowserView): """helper view for indexing all portal content in Solr""" def mklog(self, use_std_log=False): """helper to prepend a time stamp to the output""" write = self.request.RESPONSE.write def log(msg, timestamp=True): if timestamp: msg = strftime("%Y/%m/%d-%H:%M:%S ") + msg write(msg.encode("utf-8")) if use_std_log: logger.info(msg) return log def optimize(self): """optimize solr indexes""" manager = queryUtility(ISolrConnectionManager) conn = manager.getConnection() conn.setTimeout(None) conn.commit(optimize=True) return "solr indexes optimized." def clear(self): """clear all data from solr, i.e. delete all indexed objects""" manager = queryUtility(ISolrConnectionManager) uniqueKey = manager.getSchema().uniqueKey conn = manager.getConnection() conn.setTimeout(None) conn.deleteByQuery("%s:[* TO *]" % uniqueKey) conn.commit() return "solr index cleared." def reindex( self, batch=1000, skip=0, limit=0, ignore_portal_types=None, only_portal_types=None, idxs=[], ignore_exceptions=False, ): """find all contentish objects (meaning all objects derived from one of the catalog mixin classes) and (re)indexes them""" if ignore_portal_types and only_portal_types: raise ValueError( "It is not possible to combine " "ignore_portal_types with only_portal_types" ) atomic = idxs != [] manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() zodb_conn = self.context._p_jar log = self.mklog() log("reindexing solr catalog...\n") if skip: log("skipping indexing of %d object(s)...\n" % skip) if limit: log("limiting indexing to %d object(s)...\n" % limit) real = timer() # real time lap = timer() # real lap time (for intermediate commits) cpu = timer(process_time) # cpu time processed = 0 schema = manager.getSchema() key = schema.uniqueKey updates = {} # list to hold data to be updated def flush(): return conn.commit(soft=True) flush = notimeout(flush) def checkPoint(): for my_boost_values, data in updates.values(): adder = data.pop("_solr_adder") try: adder(conn, boost_values=my_boost_values, **data) except Exception as e: logger.warning("Error %s @ %s", e, data["path_string"]) if not ignore_exceptions: raise updates.clear() msg = ( "intermediate commit (%d items processed, " "last batch in %s)...\n" % (processed, next(lap)) ) log(msg) logger.info(msg) flush() zodb_conn.cacheGC() cpi = checkpointIterator(checkPoint, batch) count = 0 if atomic: log("indexing only {0} \n".format(idxs)) for path, obj in findObjects(self.context): if ICheckIndexable(obj)(): count += 1 if count <= skip: continue if ignore_portal_types: if obj.portal_type in ignore_portal_types: continue if only_portal_types: if obj.portal_type not in only_portal_types: continue attributes = None if atomic: attributes = idxs # For atomic updates to work the uniqueKey must be present # in *every* update operation. if attributes and key not in attributes: attributes.append(key) data, missing = proc.getData(obj, attributes=attributes) prepareData(data) if not missing or atomic: value = data.get(key, None) if value is not None: log("indexing %r\n" % obj) pt = data.get("portal_type", "default") adder = queryAdapter(obj, ISolrAddHandler, name=pt) if adder is None: adder = DefaultAdder(obj) data["_solr_adder"] = adder updates[value] = (boost_values(obj, data), data) processed += 1 next(cpi) else: log("missing data, skipping indexing of %r.\n" % obj) if limit and count >= (skip + limit): break checkPoint() conn.commit() log("solr index rebuilt.\n") msg = "processed %d items in %s (%s cpu time)." msg = msg % (processed, next(real), next(cpu)) log(msg) logger.info(msg) def sync(self, batch=1000, preImportDeleteQuery="*:*"): """Sync the Solr index with the portal catalog. Records contained in the catalog but not in Solr will be indexed and records not contained in the catalog will be removed. """ manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() key = queryUtility(ISolrConnectionManager).getSchema().uniqueKey zodb_conn = self.context._p_jar catalog = getToolByName(self.context, "portal_catalog") getIndex = catalog._catalog.getIndex modified_index = getIndex("modified") uid_index = getIndex(key) log = self.mklog() real = timer() # real time lap = timer() # real lap time (for intermediate commits) cpu = timer(process_time) # cpu time # get Solr status response = conn.search( q=preImportDeleteQuery, rows=MAX_ROWS, fl="%s modified" % key ) # avoid creating DateTime instances simple_unmarshallers = unmarshallers.copy() simple_unmarshallers["date"] = parse_date_as_datetime flares = SolrResponse(response, simple_unmarshallers) response.close() solr_results = {} solr_uids = set() def _utc_convert(value): t_tup = value.utctimetuple() return ( ((t_tup[0] * 12 + t_tup[1]) * 31 + t_tup[2]) * 24 + t_tup[3] ) * 60 + t_tup[4] for flare in flares: uid = flare[key] solr_uids.add(uid) solr_results[uid] = _utc_convert(flare["modified"]) # get catalog status cat_results = {} cat_uids = set() for uid, rid in uid_index._index.items(): cat_uids.add(uid) cat_results[uid] = rid # differences index = cat_uids.difference(solr_uids) solr_uids.difference_update(cat_uids) unindex = solr_uids processed = 0 flush = notimeout(lambda: conn.flush()) def checkPoint(): msg = ( "intermediate commit (%d items processed, " "last batch in %s)...\n" % (processed, next(lap)) ) log(msg) logger.info(msg) flush() zodb_conn.cacheGC() cpi = checkpointIterator(checkPoint, batch) # Look up objects uid_rid_get = cat_results.get rid_path_get = catalog._catalog.paths.get catalog_traverse = catalog.unrestrictedTraverse def lookup( uid, rid=None, uid_rid_get=uid_rid_get, rid_path_get=rid_path_get, catalog_traverse=catalog_traverse, ): if rid is None: rid = uid_rid_get(uid) if not rid: return None if not isinstance(rid, int): rid = tuple(rid)[0] path = rid_path_get(rid) if not path: return None try: obj = catalog_traverse(path) except AttributeError: return None return obj log('processing %d "unindex" operations next...\n' % len(unindex)) op = notimeout(lambda uid: conn.delete(id=uid)) for uid in unindex: obj = lookup(uid) if obj is None: op(uid) processed += 1 next(cpi) else: log("not unindexing existing object %r.\n" % uid) log('processing %d "index" operations next...\n' % len(index)) op = notimeout(lambda obj: proc.index(obj)) for uid in index: obj = lookup(uid) if ICheckIndexable(obj)(): op(obj) processed += 1 next(cpi) else: log("not indexing unindexable object %r.\n" % uid) if obj is not None: obj._p_deactivate() log('processing "reindex" operations next...\n') op = notimeout(lambda obj: proc.reindex(obj)) cat_mod_get = modified_index._unindex.get solr_mod_get = solr_results.get done = unindex.union(index) for uid, rid in cat_results.items(): if uid in done: continue if isinstance(rid, IITreeSet): rid = list(rid.keys())[0] if cat_mod_get(rid) != solr_mod_get(uid): obj = lookup(uid, rid=rid) if ICheckIndexable(obj)(): op(obj) processed += 1 next(cpi) else: log("not reindexing unindexable object %r.\n" % uid) if obj is not None: obj._p_deactivate() conn.commit() log("solr index synced.\n") msg = "processed %d object(s) in %s (%s cpu time)." msg = msg % (processed, next(real), next(cpu)) log(msg) logger.info(msg) def cleanup(self, batch=1000): """remove entries from solr that don't have a corresponding Zope object or have a different UID than the real object""" manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() log = self.mklog(use_std_log=True) log("cleaning up solr index...\n") key = manager.getSchema().uniqueKey start = 0 resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start)) res = resp.results() log("%s items in solr catalog\n" % resp.response.numFound) deleted = 0 reindexed = 0 while len(res) > 0: for flare in res: try: ob = PloneFlare(flare).getObject() except Exception as err: log( "Error getting object, removing: %s (%s)\n" % (flare["path_string"], err) ) conn.delete(flare[key]) deleted += 1 continue if ob is None: log("Object not found, removing: %s\n" % (flare["path_string"])) conn.delete(flare[key]) deleted += 1 continue if not IUUIDAware.providedBy(ob): no_skipping_msg = ( "Object %s of type %s does not " + "support uuids, skipping.\n" ) log( no_skipping_msg % ("/".join(ob.getPhysicalPath()), ob.meta_type) ) continue uuid = IUUID(ob) if uuid != flare[key]: log( "indexed under wrong UID, removing: %s\n" % flare["path_string"] ) conn.delete(flare[key]) deleted += 1 realob_res = SolrResponse( conn.search(q="%s:%s" % (key, uuid)) ).results() if len(realob_res) == 0: log("no sane entry for last object, reindexing\n") data, missing = proc.getData(ob) prepareData(data) if not missing: boost = boost_values(ob, data) conn.add(boost_values=boost, **data) reindexed += 1 else: log(" missing data, cannot index.\n") log("handled batch of %d items, committing\n" % len(res)) conn.commit() start += batch resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start)) res = resp.results() finished_msg = ( "solr cleanup finished, %s item(s) removed, " + "%s item(s) reindexed\n" ) msg = finished_msg % (deleted, reindexed) log(msg) logger.info(msg)
35.718182
88
0.53646
from logging import getLogger from time import time, strftime from BTrees.IIBTree import IITreeSet from Products.CMFCore.utils import getToolByName from Products.Five.browser import BrowserView from plone.uuid.interfaces import IUUID, IUUIDAware from zope.interface import implementer from zope.component import queryUtility, queryAdapter from collective.solr.indexer import DefaultAdder from collective.solr.flare import PloneFlare from collective.solr.interfaces import ISolrConnectionManager from collective.solr.interfaces import ISolrMaintenanceView from collective.solr.interfaces import ISolrAddHandler from collective.solr.interfaces import ICheckIndexable from collective.solr.indexer import SolrIndexProcessor from collective.solr.indexer import boost_values from collective.solr.parser import parse_date_as_datetime from collective.solr.parser import SolrResponse from collective.solr.parser import unmarshallers from collective.solr.utils import findObjects from collective.solr.utils import prepareData logger = getLogger("collective.solr.maintenance") MAX_ROWS = 1000000000 try: from time import process_time except ImportError: from time import clock as process_time def timer(func=time): def gen(last=func()): while True: elapsed = func() - last last = func() yield "%.3fs" % elapsed return gen() def checkpointIterator(function, interval=100): counter = 0 while True: counter += 1 if counter % interval == 0: function() yield None def notimeout(func): def wrapper(*args, **kw): manager = queryUtility(ISolrConnectionManager) manager.setTimeout(None, lock=True) try: return func(*args, **kw) finally: manager.setTimeout(None, lock=False) return wrapper @implementer(ISolrMaintenanceView) class SolrMaintenanceView(BrowserView): def mklog(self, use_std_log=False): write = self.request.RESPONSE.write def log(msg, timestamp=True): if timestamp: msg = strftime("%Y/%m/%d-%H:%M:%S ") + msg write(msg.encode("utf-8")) if use_std_log: logger.info(msg) return log def optimize(self): manager = queryUtility(ISolrConnectionManager) conn = manager.getConnection() conn.setTimeout(None) conn.commit(optimize=True) return "solr indexes optimized." def clear(self): manager = queryUtility(ISolrConnectionManager) uniqueKey = manager.getSchema().uniqueKey conn = manager.getConnection() conn.setTimeout(None) conn.deleteByQuery("%s:[* TO *]" % uniqueKey) conn.commit() return "solr index cleared." def reindex( self, batch=1000, skip=0, limit=0, ignore_portal_types=None, only_portal_types=None, idxs=[], ignore_exceptions=False, ): if ignore_portal_types and only_portal_types: raise ValueError( "It is not possible to combine " "ignore_portal_types with only_portal_types" ) atomic = idxs != [] manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() zodb_conn = self.context._p_jar log = self.mklog() log("reindexing solr catalog...\n") if skip: log("skipping indexing of %d object(s)...\n" % skip) if limit: log("limiting indexing to %d object(s)...\n" % limit) real = timer() lap = timer() cpu = timer(process_time) processed = 0 schema = manager.getSchema() key = schema.uniqueKey updates = {} def flush(): return conn.commit(soft=True) flush = notimeout(flush) def checkPoint(): for my_boost_values, data in updates.values(): adder = data.pop("_solr_adder") try: adder(conn, boost_values=my_boost_values, **data) except Exception as e: logger.warning("Error %s @ %s", e, data["path_string"]) if not ignore_exceptions: raise updates.clear() msg = ( "intermediate commit (%d items processed, " "last batch in %s)...\n" % (processed, next(lap)) ) log(msg) logger.info(msg) flush() zodb_conn.cacheGC() cpi = checkpointIterator(checkPoint, batch) count = 0 if atomic: log("indexing only {0} \n".format(idxs)) for path, obj in findObjects(self.context): if ICheckIndexable(obj)(): count += 1 if count <= skip: continue if ignore_portal_types: if obj.portal_type in ignore_portal_types: continue if only_portal_types: if obj.portal_type not in only_portal_types: continue attributes = None if atomic: attributes = idxs if attributes and key not in attributes: attributes.append(key) data, missing = proc.getData(obj, attributes=attributes) prepareData(data) if not missing or atomic: value = data.get(key, None) if value is not None: log("indexing %r\n" % obj) pt = data.get("portal_type", "default") adder = queryAdapter(obj, ISolrAddHandler, name=pt) if adder is None: adder = DefaultAdder(obj) data["_solr_adder"] = adder updates[value] = (boost_values(obj, data), data) processed += 1 next(cpi) else: log("missing data, skipping indexing of %r.\n" % obj) if limit and count >= (skip + limit): break checkPoint() conn.commit() log("solr index rebuilt.\n") msg = "processed %d items in %s (%s cpu time)." msg = msg % (processed, next(real), next(cpu)) log(msg) logger.info(msg) def sync(self, batch=1000, preImportDeleteQuery="*:*"): manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() key = queryUtility(ISolrConnectionManager).getSchema().uniqueKey zodb_conn = self.context._p_jar catalog = getToolByName(self.context, "portal_catalog") getIndex = catalog._catalog.getIndex modified_index = getIndex("modified") uid_index = getIndex(key) log = self.mklog() real = timer() lap = timer() cpu = timer(process_time) response = conn.search( q=preImportDeleteQuery, rows=MAX_ROWS, fl="%s modified" % key ) simple_unmarshallers = unmarshallers.copy() simple_unmarshallers["date"] = parse_date_as_datetime flares = SolrResponse(response, simple_unmarshallers) response.close() solr_results = {} solr_uids = set() def _utc_convert(value): t_tup = value.utctimetuple() return ( ((t_tup[0] * 12 + t_tup[1]) * 31 + t_tup[2]) * 24 + t_tup[3] ) * 60 + t_tup[4] for flare in flares: uid = flare[key] solr_uids.add(uid) solr_results[uid] = _utc_convert(flare["modified"]) cat_results = {} cat_uids = set() for uid, rid in uid_index._index.items(): cat_uids.add(uid) cat_results[uid] = rid index = cat_uids.difference(solr_uids) solr_uids.difference_update(cat_uids) unindex = solr_uids processed = 0 flush = notimeout(lambda: conn.flush()) def checkPoint(): msg = ( "intermediate commit (%d items processed, " "last batch in %s)...\n" % (processed, next(lap)) ) log(msg) logger.info(msg) flush() zodb_conn.cacheGC() cpi = checkpointIterator(checkPoint, batch) uid_rid_get = cat_results.get rid_path_get = catalog._catalog.paths.get catalog_traverse = catalog.unrestrictedTraverse def lookup( uid, rid=None, uid_rid_get=uid_rid_get, rid_path_get=rid_path_get, catalog_traverse=catalog_traverse, ): if rid is None: rid = uid_rid_get(uid) if not rid: return None if not isinstance(rid, int): rid = tuple(rid)[0] path = rid_path_get(rid) if not path: return None try: obj = catalog_traverse(path) except AttributeError: return None return obj log('processing %d "unindex" operations next...\n' % len(unindex)) op = notimeout(lambda uid: conn.delete(id=uid)) for uid in unindex: obj = lookup(uid) if obj is None: op(uid) processed += 1 next(cpi) else: log("not unindexing existing object %r.\n" % uid) log('processing %d "index" operations next...\n' % len(index)) op = notimeout(lambda obj: proc.index(obj)) for uid in index: obj = lookup(uid) if ICheckIndexable(obj)(): op(obj) processed += 1 next(cpi) else: log("not indexing unindexable object %r.\n" % uid) if obj is not None: obj._p_deactivate() log('processing "reindex" operations next...\n') op = notimeout(lambda obj: proc.reindex(obj)) cat_mod_get = modified_index._unindex.get solr_mod_get = solr_results.get done = unindex.union(index) for uid, rid in cat_results.items(): if uid in done: continue if isinstance(rid, IITreeSet): rid = list(rid.keys())[0] if cat_mod_get(rid) != solr_mod_get(uid): obj = lookup(uid, rid=rid) if ICheckIndexable(obj)(): op(obj) processed += 1 next(cpi) else: log("not reindexing unindexable object %r.\n" % uid) if obj is not None: obj._p_deactivate() conn.commit() log("solr index synced.\n") msg = "processed %d object(s) in %s (%s cpu time)." msg = msg % (processed, next(real), next(cpu)) log(msg) logger.info(msg) def cleanup(self, batch=1000): manager = queryUtility(ISolrConnectionManager) proc = SolrIndexProcessor(manager) conn = manager.getConnection() log = self.mklog(use_std_log=True) log("cleaning up solr index...\n") key = manager.getSchema().uniqueKey start = 0 resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start)) res = resp.results() log("%s items in solr catalog\n" % resp.response.numFound) deleted = 0 reindexed = 0 while len(res) > 0: for flare in res: try: ob = PloneFlare(flare).getObject() except Exception as err: log( "Error getting object, removing: %s (%s)\n" % (flare["path_string"], err) ) conn.delete(flare[key]) deleted += 1 continue if ob is None: log("Object not found, removing: %s\n" % (flare["path_string"])) conn.delete(flare[key]) deleted += 1 continue if not IUUIDAware.providedBy(ob): no_skipping_msg = ( "Object %s of type %s does not " + "support uuids, skipping.\n" ) log( no_skipping_msg % ("/".join(ob.getPhysicalPath()), ob.meta_type) ) continue uuid = IUUID(ob) if uuid != flare[key]: log( "indexed under wrong UID, removing: %s\n" % flare["path_string"] ) conn.delete(flare[key]) deleted += 1 realob_res = SolrResponse( conn.search(q="%s:%s" % (key, uuid)) ).results() if len(realob_res) == 0: log("no sane entry for last object, reindexing\n") data, missing = proc.getData(ob) prepareData(data) if not missing: boost = boost_values(ob, data) conn.add(boost_values=boost, **data) reindexed += 1 else: log(" missing data, cannot index.\n") log("handled batch of %d items, committing\n" % len(res)) conn.commit() start += batch resp = SolrResponse(conn.search(q="*:*", rows=batch, start=start)) res = resp.results() finished_msg = ( "solr cleanup finished, %s item(s) removed, " + "%s item(s) reindexed\n" ) msg = finished_msg % (deleted, reindexed) log(msg) logger.info(msg)
true
true
f70391cad5dd31becb556ae04a51819ce17fe64b
1,881
py
Python
data-and-statistics/understanding-a-population-using-sampling.py
jeantardelli/math-with-python
119bbbc62329c0d834d965232239bd3b39116cc1
[ "MIT" ]
1
2021-01-16T21:42:42.000Z
2021-01-16T21:42:42.000Z
data-and-statistics/understanding-a-population-using-sampling.py
jeantardelli/math-with-python
119bbbc62329c0d834d965232239bd3b39116cc1
[ "MIT" ]
null
null
null
data-and-statistics/understanding-a-population-using-sampling.py
jeantardelli/math-with-python
119bbbc62329c0d834d965232239bd3b39116cc1
[ "MIT" ]
null
null
null
""" One of the central problems in statistics is to make estimations — and quantify how good these estimations are — of the distribution of an entire population given only a small (random) sample. A classic example is to estimate the average height of all the people in a country when measuring the height of a randomly selected sample of people. These kinds of problems are particularly interesting when the true population distribution, by which we usually mean the mean of the whole population, cannot feasibly be measured. In this case, we must rely on our knowledge of statistics and a (usually much smaller) randomly selected sample to estimate the true population mean and standard deviation, and also quantify how good our estimations are. It is the latter that is the source of confusion, misunderstanding, and misrepresentation of statistics in the wider world. This module illustrates how to estimate the population mean and give a confidence interval fo these estimates. """ import math import pandas as pd from scipy import stats sample_data = pd.Series([ 172.3, 171.3, 164.7, 162.9, 172.5, 176.3, 174.8, 171.9, 176.8, 167.8, 164.5, 179.7, 157.8, 170.6, 189.9, 185. , 172.7, 165.5, 174.5, 171.5]) sample_mean = sample_data.mean() sample_std = sample_data.std() print(f"Mean: {sample_mean}, st. dev: {sample_std}") # Mean: 172.15, st. dev: 7.473778724383846 N = sample_data.count() std_err = sample_std/math.sqrt(N) cv_95, cv_99 = stats.t.ppf([0.975, 0.995], df=N-1) pm_95 = cv_95 * std_err pm_99 = cv_99 * std_err conf_interval_95 = [sample_mean - pm_95, sample_mean + pm_95] conf_interval_99 = [sample_mean - pm_99, sample_mean + pm_99] print(f"95% confidence: {conf_interval_95}") print(f"99% confidence: {conf_interval_99}") # 95% confidence: [168.65216388659374, 175.64783611340627] # 99% confidence: [167.36884119608774, 176.93115880391227]
40.021277
80
0.758107
import math import pandas as pd from scipy import stats sample_data = pd.Series([ 172.3, 171.3, 164.7, 162.9, 172.5, 176.3, 174.8, 171.9, 176.8, 167.8, 164.5, 179.7, 157.8, 170.6, 189.9, 185. , 172.7, 165.5, 174.5, 171.5]) sample_mean = sample_data.mean() sample_std = sample_data.std() print(f"Mean: {sample_mean}, st. dev: {sample_std}") N = sample_data.count() std_err = sample_std/math.sqrt(N) cv_95, cv_99 = stats.t.ppf([0.975, 0.995], df=N-1) pm_95 = cv_95 * std_err pm_99 = cv_99 * std_err conf_interval_95 = [sample_mean - pm_95, sample_mean + pm_95] conf_interval_99 = [sample_mean - pm_99, sample_mean + pm_99] print(f"95% confidence: {conf_interval_95}") print(f"99% confidence: {conf_interval_99}")
true
true
f703920ef91c9979de1ce19afa587a2ed363587a
500
py
Python
algorithm/github_al/flatten.py
freedomDR/coding
310a68077de93ef445ccd2929e90ba9c22a9b8eb
[ "MIT" ]
null
null
null
algorithm/github_al/flatten.py
freedomDR/coding
310a68077de93ef445ccd2929e90ba9c22a9b8eb
[ "MIT" ]
null
null
null
algorithm/github_al/flatten.py
freedomDR/coding
310a68077de93ef445ccd2929e90ba9c22a9b8eb
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from collections import Iterable def flatten(input_arr, output_arr = None): if output_arr is None: output_arr = [] for t in input_arr: if isinstance(t, Iterable): flatten(t, output_arr) else: output_arr.append(t) return output_arr def flatten_iter(iterable): for t in iterable: if isinstance(t, Iterable): yield from flatten_iter(t) else: yield t
22.727273
42
0.592
from collections import Iterable def flatten(input_arr, output_arr = None): if output_arr is None: output_arr = [] for t in input_arr: if isinstance(t, Iterable): flatten(t, output_arr) else: output_arr.append(t) return output_arr def flatten_iter(iterable): for t in iterable: if isinstance(t, Iterable): yield from flatten_iter(t) else: yield t
true
true
f703926c80160625841d05702077db2125d9440e
23,974
py
Python
keras/optimizer_v2/rmsprop_test.py
Bhavay192/keras
ed6ca50cceb2a071f86e5e9af5076b1d62fd2531
[ "Apache-2.0" ]
37,222
2017-12-13T00:52:55.000Z
2022-03-31T22:34:35.000Z
keras/optimizer_v2/rmsprop_test.py
amirsadafi/keras
f1e9c76675981ee6683f54a3ce569212d551d12d
[ "Apache-2.0" ]
7,624
2017-12-13T01:03:40.000Z
2022-03-31T23:57:24.000Z
keras/optimizer_v2/rmsprop_test.py
amirsadafi/keras
f1e9c76675981ee6683f54a3ce569212d551d12d
[ "Apache-2.0" ]
14,914
2017-12-13T02:30:46.000Z
2022-03-30T14:49:16.000Z
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for rmsprop.""" import tensorflow.compat.v2 as tf import copy import itertools import math from absl.testing import parameterized import numpy as np from tensorflow.python.framework import test_util from keras import combinations from keras import testing_utils from keras.optimizer_v2 import learning_rate_schedule from keras.optimizer_v2 import rmsprop _DATA_TYPES = [ tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128 ] _TEST_PARAM_VALUES = [ # learning_rate, rho, momentum, epsilon, centered [0.05, 0.9, 0.0, 1e-3, True], [0.05, 0.9, 0.0, 1e-3, False], [0.1, 0.9, 0.0, 1e-3, True], [0.01, 0.9, 0.0, 1e-5, True], [0.01, 0.9, 0.9, 1e-5, True], ] _TESTPARAMS = [ [data_type] + values for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES) ] class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase): def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum, epsilon, centered): rms_t = rms * rho + (1 - rho) * g * g if centered: mg_t = mg * rho + (1 - rho) * g denom_t = rms_t - mg_t * mg_t else: mg_t = mg denom_t = rms_t if momentum > 0.: mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon)) var_t = var - mom_t else: mom_t = mom var_t = var - lr * g / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom, lr, rho, momentum, epsilon, centered): mg_t = copy.deepcopy(mg) rms_t = copy.deepcopy(rms) mom_t = copy.deepcopy(mom) var_t = copy.deepcopy(var) for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue if centered: mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex] else: denom_t = rms_t[gindex] if momentum > 0.: mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t + epsilon) var_t[gindex] = var[gindex] - mom_t[gindex] else: mom_t[gindex] = mom[gindex] var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def testDense(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, dtype=dtype) var1 = tf.Variable(var1_np, dtype=dtype) grads0 = tf.constant(grads0_np, dtype=dtype) grads1 = tf.constant(grads1_np, dtype=dtype) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") mg1 = opt.get_slot(var1, "mg") else: mg0 = None mg1 = None if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateInverseTimeDecay(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) opt = rmsprop.RMSprop( learning_rate=lr_schedule, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 4 steps of RMSprop for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) # Validate updated params self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testMinimizeSparseResourceVariable(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0, centered=False).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[0., 1.]], self.evaluate(var0), atol=0.01) def testMinimizeSparseResourceVariableCentered(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop return pred * pred # loss = lambda: pred * pred # pylint: disable=cell-var-from-loop sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0, centered=True).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd self.evaluate(sgd_op) # Validate updated params self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0), atol=0.01) def testSparse(self): # TODO(tanzheny, omalleyt): Fix test in eager mode. for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): # Initialize variables for numpy implementation. var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([1])) grads1_np_indices = np.array([1], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([1])) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") self.assertEqual(mg0 is not None, centered) mg1 = opt.get_slot(var1, "mg") self.assertEqual(mg1 is not None, centered) else: mg0 = None mg1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 3 steps of RMSprop for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy( var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy( var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) # Validate updated params if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["eager"])) def testCallableParams(self): for dtype in _DATA_TYPES: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) learning_rate = lambda: 2.0 rho = lambda: 0.9 momentum = lambda: 0.0 epsilon = 1.0 opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Step 1: the rms accumulators where 1. So we should see a normal # update: v -= grad * learning_rate opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) ]), self.evaluate(var1)) # Step 2: the root mean square accumulators contain the previous update. opt.apply_gradients(zip([grads0, grads1], [var0, var1])) # Check the parameters. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)) ]), self.evaluate(var1)) def testConstructRMSpropWithLR(self): opt = rmsprop.RMSprop(lr=1.0) opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0) opt_3 = rmsprop.RMSprop(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) @combinations.generate(combinations.combine(mode=["eager"])) def testSlotsUniqueEager(self): v1 = tf.Variable(1.) v2 = tf.Variable(1.) opt = rmsprop.RMSprop(1., momentum=0., centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and one unique slot variable for v1 and v2. self.assertLen(set({id(v) for v in opt.variables()}), 3) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and two unique slot variables for v1 and v2. self.assertLen(set({id(v) for v in opt.variables()}), 5) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) # There should be iteration, and three unique slot variables for v1 and v2 self.assertLen(set({id(v) for v in opt.variables()}), 7) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) @combinations.generate(combinations.combine(mode=["eager"])) def testMomentumProperValue(self): with self.assertRaisesRegex(ValueError, r"`momentum` must be between \[0, 1\]. " r"Received: momentum=2.5 \(of type <class " r"\'float\'>\)."): rmsprop.RMSprop(1., momentum=2.5, centered=False) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class SlotColocationTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters([True, False]) @test_util.run_gpu_only def testRunMinimizeOnGPUForCPUVariables(self, use_resource): with tf.device("/device:CPU:0"): if use_resource: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) else: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) def loss(): return 5 * var0 + 3 * var1 opt = rmsprop.RMSprop( learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0) # Fetch params to validate initial values self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) # Run 1 step through optimizer on GPU. # Slot variables are created the first time optimizer is used on some # variable. This tests that slot variables will be colocated with the base # variable. with tf.device("/device:GPU:0"): # Note that for eager execution, minimize expects a function instead of a # Tensor. opt_op = opt.minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) # Validate updated params, All variables should have decreased. self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)), msg="updated variables: %s" % self.evaluate(var0)) self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)), msg="updated variables: %s" % self.evaluate(var1)) if __name__ == "__main__": tf.test.main()
40.633898
114
0.616042
import tensorflow.compat.v2 as tf import copy import itertools import math from absl.testing import parameterized import numpy as np from tensorflow.python.framework import test_util from keras import combinations from keras import testing_utils from keras.optimizer_v2 import learning_rate_schedule from keras.optimizer_v2 import rmsprop _DATA_TYPES = [ tf.half, tf.float32, tf.float64, tf.complex64, tf.complex128 ] _TEST_PARAM_VALUES = [ [0.05, 0.9, 0.0, 1e-3, True], [0.05, 0.9, 0.0, 1e-3, False], [0.1, 0.9, 0.0, 1e-3, True], [0.01, 0.9, 0.0, 1e-5, True], [0.01, 0.9, 0.9, 1e-5, True], ] _TESTPARAMS = [ [data_type] + values for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES) ] class RMSpropOptimizerTest(tf.test.TestCase, parameterized.TestCase): def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, rho, momentum, epsilon, centered): rms_t = rms * rho + (1 - rho) * g * g if centered: mg_t = mg * rho + (1 - rho) * g denom_t = rms_t - mg_t * mg_t else: mg_t = mg denom_t = rms_t if momentum > 0.: mom_t = momentum * mom + lr * g / (np.sqrt(denom_t + epsilon)) var_t = var - mom_t else: mom_t = mom var_t = var - lr * g / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom, lr, rho, momentum, epsilon, centered): mg_t = copy.deepcopy(mg) rms_t = copy.deepcopy(rms) mom_t = copy.deepcopy(mom) var_t = copy.deepcopy(var) for i in range(len(gindexs)): gindex = gindexs[i] gvalue = gvalues[i] rms_t[gindex] = rms[gindex] * rho + (1 - rho) * gvalue * gvalue if centered: mg_t[gindex] = mg_t[gindex] * rho + (1 - rho) * gvalue denom_t = rms_t[gindex] - mg_t[gindex] * mg_t[gindex] else: denom_t = rms_t[gindex] if momentum > 0.: mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t + epsilon) var_t[gindex] = var[gindex] - mom_t[gindex] else: mom_t[gindex] = mom[gindex] var_t[gindex] = var[gindex] - lr * gvalue / (np.sqrt(denom_t) + epsilon) return var_t, mg_t, rms_t, mom_t def testDense(self): for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np, dtype=dtype) var1 = tf.Variable(var1_np, dtype=dtype) grads0 = tf.constant(grads0_np, dtype=dtype) grads1 = tf.constant(grads1_np, dtype=dtype) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") mg1 = opt.get_slot(var1, "mg") else: mg0 = None mg1 = None if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateDecay(self): with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered, decay=decay) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testDenseWithLearningRateInverseTimeDecay(self): with tf.Graph().as_default(): var0_np = np.array([1.0, 2.0]) grads0_np = np.array([0.1, 0.2]) var1_np = np.array([3.0, 4.0]) grads1_np = np.array([0.01, 0.2]) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0 = tf.constant(grads0_np) grads1 = tf.constant(grads1_np) learning_rate = 0.01 rho = 0.9 momentum = 0.0 epsilon = 1e-7 centered = False decay = 0.5 lr_schedule = learning_rate_schedule.InverseTimeDecay( learning_rate, decay_steps=1.0, decay_rate=decay) opt = rmsprop.RMSprop( learning_rate=lr_schedule, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0]) mg1_np = np.array([0.0, 0.0]) rms0_np = np.array([0.0, 0.0]) rms1_np = np.array([0.0, 0.0]) mom0_np = np.array([0.0, 0.0]) mom1_np = np.array([0.0, 0.0]) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) for t in range(2): self.evaluate(update) lr = learning_rate / (1 + decay * t) var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy( var0_np, grads0_np, mg0_np, rms0_np, mom0_np, lr, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy( var1_np, grads1_np, mg1_np, rms1_np, mom1_np, lr, rho, momentum, epsilon, centered) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) def testMinimizeSparseResourceVariable(self): with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) return pred * pred sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=0.0, centered=False).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) self.evaluate(sgd_op) self.assertAllCloseAccordingToType([[0., 1.]], self.evaluate(var0), atol=0.01) def testMinimizeSparseResourceVariableCentered(self): with tf.Graph().as_default(): for dtype in _DATA_TYPES: var0 = tf.Variable([[1.0, 2.0]], dtype=dtype) x = tf.constant([[4.0], [5.0]], dtype=dtype) def loss(): pred = tf.matmul(tf.compat.v1.nn.embedding_lookup([var0], [0]), x) return pred * pred sgd_op = rmsprop.RMSprop( learning_rate=1.0, rho=0.0, momentum=0.0, epsilon=1.0, centered=True).minimize( loss, var_list=[var0]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) self.evaluate(sgd_op) self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0), atol=0.01) def testSparse(self): for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS: with tf.compat.v1.get_default_graph().as_default(), testing_utils.use_gpu(): var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype) var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype) var0 = tf.Variable(var0_np) var1 = tf.Variable(var1_np) grads0_np_indices = np.array([0], dtype=np.int32) grads0 = tf.IndexedSlices( tf.constant(grads0_np), tf.constant(grads0_np_indices), tf.constant([1])) grads1_np_indices = np.array([1], dtype=np.int32) grads1 = tf.IndexedSlices( tf.constant(grads1_np), tf.constant(grads1_np_indices), tf.constant([1])) opt = rmsprop.RMSprop( learning_rate=learning_rate, rho=rho, momentum=momentum, epsilon=epsilon, centered=centered) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.evaluate(tf.compat.v1.global_variables_initializer()) if centered: mg0 = opt.get_slot(var0, "mg") self.assertEqual(mg0 is not None, centered) mg1 = opt.get_slot(var1, "mg") self.assertEqual(mg1 is not None, centered) else: mg0 = None mg1 = None rms0 = opt.get_slot(var0, "rms") self.assertIsNotNone(rms0) rms1 = opt.get_slot(var1, "rms") self.assertIsNotNone(rms1) if momentum > 0.: mom0 = opt.get_slot(var0, "momentum") mom1 = opt.get_slot(var1, "momentum") else: mom0 = None mom1 = None mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) rms1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) for _ in range(1, 4): self.evaluate(update) var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy( var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate, rho, momentum, epsilon, centered) var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy( var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate, rho, momentum, epsilon, centered) if centered: self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0)) self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1)) self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0)) self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1)) if momentum > 0.: self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0)) self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1)) self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0)) self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1)) @combinations.generate(combinations.combine(mode=["eager"])) def testCallableParams(self): for dtype in _DATA_TYPES: var0 = tf.Variable([1.0, 2.0], dtype=dtype) var1 = tf.Variable([3.0, 4.0], dtype=dtype) grads0 = tf.constant([0.1, 0.1], dtype=dtype) grads1 = tf.constant([0.01, 0.01], dtype=dtype) learning_rate = lambda: 2.0 rho = lambda: 0.9 momentum = lambda: 0.0 epsilon = 1.0 opt = rmsprop.RMSprop(learning_rate, rho, momentum, epsilon) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) ]), self.evaluate(var1)) opt.apply_gradients(zip([grads0, grads1], [var0, var1])) self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)), 2.0 - (0.1 * 2.0 / math.sqrt(0.001 + 1.0)) - (0.1 * 2.0 / math.sqrt(0.001 * 0.9 + 0.001 + 1.0)) ]), self.evaluate(var0)) self.assertAllCloseAccordingToType( np.array([ 3.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)), 4.0 - (0.01 * 2.0 / math.sqrt(0.00001 + 1.0)) - (0.01 * 2.0 / math.sqrt(0.00001 * 0.9 + 1e-5 + 1.0)) ]), self.evaluate(var1)) def testConstructRMSpropWithLR(self): opt = rmsprop.RMSprop(lr=1.0) opt_2 = rmsprop.RMSprop(learning_rate=0.1, lr=1.0) opt_3 = rmsprop.RMSprop(learning_rate=0.1) self.assertIsInstance(opt.lr, tf.Variable) self.assertIsInstance(opt_2.lr, tf.Variable) self.assertIsInstance(opt_3.lr, tf.Variable) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose(self.evaluate(opt.lr), (1.0)) self.assertAllClose(self.evaluate(opt_2.lr), (1.0)) self.assertAllClose(self.evaluate(opt_3.lr), (0.1)) @combinations.generate(combinations.combine(mode=["eager"])) def testSlotsUniqueEager(self): v1 = tf.Variable(1.) v2 = tf.Variable(1.) opt = rmsprop.RMSprop(1., momentum=0., centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) self.assertLen(set({id(v) for v in opt.variables()}), 3) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=False) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) self.assertLen(set({id(v) for v in opt.variables()}), 5) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) opt = rmsprop.RMSprop(learning_rate=1., momentum=0.2, centered=True) opt.minimize(lambda: v1 + v2, var_list=[v1, v2]) self.assertLen(set({id(v) for v in opt.variables()}), 7) self.assertEqual( self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations)) @combinations.generate(combinations.combine(mode=["eager"])) def testMomentumProperValue(self): with self.assertRaisesRegex(ValueError, r"`momentum` must be between \[0, 1\]. " r"Received: momentum=2.5 \(of type <class " r"\'float\'>\)."): rmsprop.RMSprop(1., momentum=2.5, centered=False) @combinations.generate(combinations.combine(mode=["graph", "eager"])) class SlotColocationTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters([True, False]) @test_util.run_gpu_only def testRunMinimizeOnGPUForCPUVariables(self, use_resource): with tf.device("/device:CPU:0"): if use_resource: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) else: var0 = tf.Variable([1.0, 2.0], dtype=tf.float32) var1 = tf.Variable([3.0, 4.0], dtype=tf.float32) def loss(): return 5 * var0 + 3 * var1 opt = rmsprop.RMSprop( learning_rate=1.0, decay=0.9, momentum=0.5, epsilon=1.0) self.evaluate(tf.compat.v1.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) with tf.device("/device:GPU:0"): opt_op = opt.minimize(loss, [var0, var1]) self.evaluate(tf.compat.v1.global_variables_initializer()) self.evaluate(opt_op) self.assertTrue(all(v < 0.0 for v in self.evaluate(var0)), msg="updated variables: %s" % self.evaluate(var0)) self.assertTrue(all(v < 2.0 for v in self.evaluate(var1)), msg="updated variables: %s" % self.evaluate(var1)) if __name__ == "__main__": tf.test.main()
true
true
f703935a81099d2c4a2a54350f54542dcb59b99b
9,815
py
Python
visualization/visualizer/ArcBall.py
zhigangjiang/LGT-Net
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
[ "MIT" ]
11
2022-03-03T17:49:33.000Z
2022-03-25T11:23:11.000Z
visualization/visualizer/ArcBall.py
zhigangjiang/LGT-Net
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
[ "MIT" ]
3
2021-05-03T08:06:13.000Z
2022-01-13T15:21:28.000Z
ArcBall.py
fuenwang/360LayoutVisualizer
36a7b0e17be6491800c6904d0a9ec0ffcd1b5249
[ "MIT" ]
1
2022-03-04T06:39:50.000Z
2022-03-04T06:39:50.000Z
""" ArcBall.py -- Math utilities, vector, matrix types and ArcBall quaternion rotation class >>> unit_test_ArcBall_module () unit testing ArcBall Quat for first drag [ 0.08438914 -0.08534209 -0.06240178 0.99080837] First transform [[ 0.97764552 -0.1380603 0.15858325 0. ] [ 0.10925253 0.97796899 0.17787792 0. ] [-0.17964739 -0.15657592 0.97119039 0. ] [ 0. 0. 0. 1. ]] LastRot at end of first drag [[ 0.97764552 -0.1380603 0.15858325] [ 0.10925253 0.97796899 0.17787792] [-0.17964739 -0.15657592 0.97119039]] Quat for second drag [ 0.00710336 0.31832787 0.02679029 0.94757545] Second transform [[ 0.88022292 -0.08322023 -0.46720669 0. ] [ 0.14910145 0.98314685 0.10578787 0. ] [ 0.45052907 -0.16277808 0.8777966 0. ] [ 0. 0. 0. 1.00000001]] """ try: import numpy as Numeric def sumDot( a,b ): return Numeric.dot (a, b) except ImportError: try: import Numeric def sumDot( a,b ): return sum (Numeric.dot (a, b) ) except ImportError: print ("This demo requires the numpy or Numeric extension, sorry") import sys sys.exit() import copy from math import sqrt # //assuming IEEE-754(GLfloat), which i believe has max precision of 7 bits Epsilon = 1.0e-5 class ArcBallT: def __init__ (self, NewWidth, NewHeight): self.m_StVec = Vector3fT () self.m_EnVec = Vector3fT () self.m_AdjustWidth = 1.0 self.m_AdjustHeight = 1.0 self.setBounds (NewWidth, NewHeight) def __str__ (self): str_rep = "" str_rep += "StVec = " + str (self.m_StVec) str_rep += "\nEnVec = " + str (self.m_EnVec) str_rep += "\n scale coords %f %f" % (self.m_AdjustWidth, self.m_AdjustHeight) return str_rep def setBounds (self, NewWidth, NewHeight): # //Set new bounds assert (NewWidth > 1.0 and NewHeight > 1.0), "Invalid width or height for bounds." # //Set adjustment factor for width/height self.m_AdjustWidth = 1.0 / ((NewWidth - 1.0) * 0.5) self.m_AdjustHeight = 1.0 / ((NewHeight - 1.0) * 0.5) def _mapToSphere (self, NewPt): # Given a new window coordinate, will modify NewVec in place X = 0 Y = 1 Z = 2 NewVec = Vector3fT () # //Copy paramter into temp point TempPt = copy.copy (NewPt) #print ('NewPt', NewPt, TempPt) # //Adjust point coords and scale down to range of [-1 ... 1] TempPt [X] = (NewPt [X] * self.m_AdjustWidth) - 1.0 TempPt [Y] = 1.0 - (NewPt [Y] * self.m_AdjustHeight) # //Compute the square of the length of the vector to the point from the center length = sumDot( TempPt, TempPt) # //If the point is mapped outside of the sphere... (length > radius squared) if (length > 1.0): # //Compute a normalizing factor (radius / sqrt(length)) norm = 1.0 / sqrt (length); # //Return the "normalized" vector, a point on the sphere NewVec [X] = TempPt [X] * norm; NewVec [Y] = TempPt [Y] * norm; NewVec [Z] = 0.0; else: # //Else it's on the inside # //Return a vector to a point mapped inside the sphere sqrt(radius squared - length) NewVec [X] = TempPt [X] NewVec [Y] = TempPt [Y] NewVec [Z] = sqrt (1.0 - length) return NewVec def click (self, NewPt): # //Mouse down (Point2fT self.m_StVec = self._mapToSphere (NewPt) return def drag (self, NewPt): # //Mouse drag, calculate rotation (Point2fT Quat4fT) """ drag (Point2fT mouse_coord) -> new_quaternion_rotation_vec """ X = 0 Y = 1 Z = 2 W = 3 self.m_EnVec = self._mapToSphere (NewPt) # //Compute the vector perpendicular to the begin and end vectors # Perp = Vector3fT () Perp = Vector3fCross(self.m_StVec, self.m_EnVec); NewRot = Quat4fT () # //Compute the length of the perpendicular vector if (Vector3fLength(Perp) > Epsilon): # //if its non-zero # //We're ok, so return the perpendicular vector as the transform after all NewRot[X] = Perp[X]; NewRot[Y] = Perp[Y]; NewRot[Z] = Perp[Z]; # //In the quaternion values, w is cosine (theta / 2), where theta is rotation angle NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec); else: # //if its zero # //The begin and end vectors coincide, so return a quaternion of zero matrix (no rotation) NewRot.X = NewRot.Y = NewRot.Z = NewRot.W = 0.0; return NewRot # ##################### Math utility ########################################## def Matrix4fT (): return Numeric.identity (4, 'f') def Matrix3fT (): return Numeric.identity (3, 'f') def Quat4fT (): return Numeric.zeros (4, 'f') def Vector3fT (): return Numeric.zeros (3, 'f') def Point2fT (x = 0.0, y = 0.0): pt = Numeric.zeros (2, 'f') pt [0] = x pt [1] = y return pt def Vector3fDot(u, v): # Dot product of two 3f vectors dotprod = Numeric.dot (u,v) return dotprod def Vector3fCross(u, v): # Cross product of two 3f vectors X = 0 Y = 1 Z = 2 cross = Numeric.zeros (3, 'f') cross [X] = (u[Y] * v[Z]) - (u[Z] * v[Y]) cross [Y] = (u[Z] * v[X]) - (u[X] * v[Z]) cross [Z] = (u[X] * v[Y]) - (u[Y] * v[X]) return cross def Vector3fLength (u): mag_squared = sumDot(u,u) mag = sqrt (mag_squared) return mag def Matrix3fSetIdentity (): return Numeric.identity (3, 'f') def Matrix3fMulMatrix3f (matrix_a, matrix_b): return sumDot( matrix_a, matrix_b ) def Matrix4fSVD (NewObj): X = 0 Y = 1 Z = 2 s = sqrt ( ( (NewObj [X][X] * NewObj [X][X]) + (NewObj [X][Y] * NewObj [X][Y]) + (NewObj [X][Z] * NewObj [X][Z]) + (NewObj [Y][X] * NewObj [Y][X]) + (NewObj [Y][Y] * NewObj [Y][Y]) + (NewObj [Y][Z] * NewObj [Y][Z]) + (NewObj [Z][X] * NewObj [Z][X]) + (NewObj [Z][Y] * NewObj [Z][Y]) + (NewObj [Z][Z] * NewObj [Z][Z]) ) / 3.0 ) return s def Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix): # Modifies NewObj in-place by replacing its upper 3x3 portion from the # passed in 3x3 matrix. # NewObj = Matrix4fT () NewObj [0:3,0:3] = three_by_three_matrix return NewObj # /** # * Sets the rotational component (upper 3x3) of this matrix to the matrix # * values in the T precision Matrix3d argument; the other elements of # * this matrix are unchanged; a singular value decomposition is performed # * on this object's upper 3x3 matrix to factor out the scale, then this # * object's upper 3x3 matrix components are replaced by the passed rotation # * components, and then the scale is reapplied to the rotational # * components. # * @param three_by_three_matrix T precision 3x3 matrix # */ def Matrix4fSetRotationFromMatrix3f (NewObj, three_by_three_matrix): scale = Matrix4fSVD (NewObj) NewObj = Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix); scaled_NewObj = NewObj * scale # Matrix4fMulRotationScale(NewObj, scale); return scaled_NewObj def Matrix3fSetRotationFromQuat4f (q1): # Converts the H quaternion q1 into a new equivalent 3x3 rotation matrix. X = 0 Y = 1 Z = 2 W = 3 NewObj = Matrix3fT () n = sumDot(q1, q1) s = 0.0 if (n > 0.0): s = 2.0 / n xs = q1 [X] * s; ys = q1 [Y] * s; zs = q1 [Z] * s wx = q1 [W] * xs; wy = q1 [W] * ys; wz = q1 [W] * zs xx = q1 [X] * xs; xy = q1 [X] * ys; xz = q1 [X] * zs yy = q1 [Y] * ys; yz = q1 [Y] * zs; zz = q1 [Z] * zs # This math all comes about by way of algebra, complex math, and trig identities. # See Lengyel pages 88-92 NewObj [X][X] = 1.0 - (yy + zz); NewObj [Y][X] = xy - wz; NewObj [Z][X] = xz + wy; NewObj [X][Y] = xy + wz; NewObj [Y][Y] = 1.0 - (xx + zz); NewObj [Z][Y] = yz - wx; NewObj [X][Z] = xz - wy; NewObj [Y][Z] = yz + wx; NewObj [Z][Z] = 1.0 - (xx + yy) return NewObj def unit_test_ArcBall_module (): # Unit testing of the ArcBall calss and the real math behind it. # Simulates a click and drag followed by another click and drag. print ("unit testing ArcBall") Transform = Matrix4fT () LastRot = Matrix3fT () ThisRot = Matrix3fT () ArcBall = ArcBallT (640, 480) # print "The ArcBall with NO click" # print ArcBall # First click LastRot = copy.copy (ThisRot) mouse_pt = Point2fT (500,250) ArcBall.click (mouse_pt) # print "The ArcBall with first click" # print ArcBall # First drag mouse_pt = Point2fT (475, 275) ThisQuat = ArcBall.drag (mouse_pt) # print "The ArcBall after first drag" # print ArcBall # print # print print ("Quat for first drag") print (ThisQuat) ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat) # Linear Algebra matrix multiplication A = old, B = New : C = A * B ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot) Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot) print ("First transform") print (Transform) # Done with first drag # second click LastRot = copy.copy (ThisRot) print ("LastRot at end of first drag") print (LastRot) mouse_pt = Point2fT (350,260) ArcBall.click (mouse_pt) # second drag mouse_pt = Point2fT (450, 260) ThisQuat = ArcBall.drag (mouse_pt) # print "The ArcBall" # print ArcBall print ("Quat for second drag") print (ThisQuat) ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat) ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot) # print ThisRot Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot) print ("Second transform") print (Transform) # Done with second drag LastRot = copy.copy (ThisRot) def _test (): # This will run doctest's unit testing capability. # see http://www.python.org/doc/current/lib/module-doctest.html # # doctest introspects the ArcBall module for all docstrings # that look like interactive python sessions and invokes # the same commands then and there as unit tests to compare # the output generated. Very nice for unit testing and # documentation. import doctest, ArcBall return doctest.testmod (ArcBall) if __name__ == "__main__": # Invoke our function that runs python's doctest unit testing tool. _test () # unit_test ()
30.29321
111
0.652573
try: import numpy as Numeric def sumDot( a,b ): return Numeric.dot (a, b) except ImportError: try: import Numeric def sumDot( a,b ): return sum (Numeric.dot (a, b) ) except ImportError: print ("This demo requires the numpy or Numeric extension, sorry") import sys sys.exit() import copy from math import sqrt Epsilon = 1.0e-5 class ArcBallT: def __init__ (self, NewWidth, NewHeight): self.m_StVec = Vector3fT () self.m_EnVec = Vector3fT () self.m_AdjustWidth = 1.0 self.m_AdjustHeight = 1.0 self.setBounds (NewWidth, NewHeight) def __str__ (self): str_rep = "" str_rep += "StVec = " + str (self.m_StVec) str_rep += "\nEnVec = " + str (self.m_EnVec) str_rep += "\n scale coords %f %f" % (self.m_AdjustWidth, self.m_AdjustHeight) return str_rep def setBounds (self, NewWidth, NewHeight): assert (NewWidth > 1.0 and NewHeight > 1.0), "Invalid width or height for bounds." self.m_AdjustWidth = 1.0 / ((NewWidth - 1.0) * 0.5) self.m_AdjustHeight = 1.0 / ((NewHeight - 1.0) * 0.5) def _mapToSphere (self, NewPt): X = 0 Y = 1 Z = 2 NewVec = Vector3fT () TempPt = copy.copy (NewPt) TempPt [X] = (NewPt [X] * self.m_AdjustWidth) - 1.0 TempPt [Y] = 1.0 - (NewPt [Y] * self.m_AdjustHeight) length = sumDot( TempPt, TempPt) if (length > 1.0): norm = 1.0 / sqrt (length); NewVec [X] = TempPt [X] * norm; NewVec [Y] = TempPt [Y] * norm; NewVec [Z] = 0.0; else: # //Return a vector to a point mapped inside the sphere sqrt(radius squared - length) NewVec [X] = TempPt [X] NewVec [Y] = TempPt [Y] NewVec [Z] = sqrt (1.0 - length) return NewVec def click (self, NewPt): # //Mouse down (Point2fT self.m_StVec = self._mapToSphere (NewPt) return def drag (self, NewPt): # //Mouse drag, calculate rotation (Point2fT Quat4fT) X = 0 Y = 1 Z = 2 W = 3 self.m_EnVec = self._mapToSphere (NewPt) # //Compute the vector perpendicular to the begin and end vectors # Perp = Vector3fT () Perp = Vector3fCross(self.m_StVec, self.m_EnVec); NewRot = Quat4fT () # //Compute the length of the perpendicular vector if (Vector3fLength(Perp) > Epsilon): # //if its non-zero # //We're ok, so return the perpendicular vector as the transform after all NewRot[X] = Perp[X]; NewRot[Y] = Perp[Y]; NewRot[Z] = Perp[Z]; NewRot[W] = Vector3fDot(self.m_StVec, self.m_EnVec); else: NewRot.X = NewRot.Y = NewRot.Z = NewRot.W = 0.0; return NewRot def Matrix4fT (): return Numeric.identity (4, 'f') def Matrix3fT (): return Numeric.identity (3, 'f') def Quat4fT (): return Numeric.zeros (4, 'f') def Vector3fT (): return Numeric.zeros (3, 'f') def Point2fT (x = 0.0, y = 0.0): pt = Numeric.zeros (2, 'f') pt [0] = x pt [1] = y return pt def Vector3fDot(u, v): dotprod = Numeric.dot (u,v) return dotprod def Vector3fCross(u, v): X = 0 Y = 1 Z = 2 cross = Numeric.zeros (3, 'f') cross [X] = (u[Y] * v[Z]) - (u[Z] * v[Y]) cross [Y] = (u[Z] * v[X]) - (u[X] * v[Z]) cross [Z] = (u[X] * v[Y]) - (u[Y] * v[X]) return cross def Vector3fLength (u): mag_squared = sumDot(u,u) mag = sqrt (mag_squared) return mag def Matrix3fSetIdentity (): return Numeric.identity (3, 'f') def Matrix3fMulMatrix3f (matrix_a, matrix_b): return sumDot( matrix_a, matrix_b ) def Matrix4fSVD (NewObj): X = 0 Y = 1 Z = 2 s = sqrt ( ( (NewObj [X][X] * NewObj [X][X]) + (NewObj [X][Y] * NewObj [X][Y]) + (NewObj [X][Z] * NewObj [X][Z]) + (NewObj [Y][X] * NewObj [Y][X]) + (NewObj [Y][Y] * NewObj [Y][Y]) + (NewObj [Y][Z] * NewObj [Y][Z]) + (NewObj [Z][X] * NewObj [Z][X]) + (NewObj [Z][Y] * NewObj [Z][Y]) + (NewObj [Z][Z] * NewObj [Z][Z]) ) / 3.0 ) return s def Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix): NewObj [0:3,0:3] = three_by_three_matrix return NewObj # * object's upper 3x3 matrix components are replaced by the passed rotation def Matrix4fSetRotationFromMatrix3f (NewObj, three_by_three_matrix): scale = Matrix4fSVD (NewObj) NewObj = Matrix4fSetRotationScaleFromMatrix3f(NewObj, three_by_three_matrix); scaled_NewObj = NewObj * scale return scaled_NewObj def Matrix3fSetRotationFromQuat4f (q1): X = 0 Y = 1 Z = 2 W = 3 NewObj = Matrix3fT () n = sumDot(q1, q1) s = 0.0 if (n > 0.0): s = 2.0 / n xs = q1 [X] * s; ys = q1 [Y] * s; zs = q1 [Z] * s wx = q1 [W] * xs; wy = q1 [W] * ys; wz = q1 [W] * zs xx = q1 [X] * xs; xy = q1 [X] * ys; xz = q1 [X] * zs yy = q1 [Y] * ys; yz = q1 [Y] * zs; zz = q1 [Z] * zs NewObj [X][X] = 1.0 - (yy + zz); NewObj [Y][X] = xy - wz; NewObj [Z][X] = xz + wy; NewObj [X][Y] = xy + wz; NewObj [Y][Y] = 1.0 - (xx + zz); NewObj [Z][Y] = yz - wx; NewObj [X][Z] = xz - wy; NewObj [Y][Z] = yz + wx; NewObj [Z][Z] = 1.0 - (xx + yy) return NewObj def unit_test_ArcBall_module (): print ("unit testing ArcBall") Transform = Matrix4fT () LastRot = Matrix3fT () ThisRot = Matrix3fT () ArcBall = ArcBallT (640, 480) LastRot = copy.copy (ThisRot) mouse_pt = Point2fT (500,250) ArcBall.click (mouse_pt) mouse_pt = Point2fT (475, 275) ThisQuat = ArcBall.drag (mouse_pt) print ("Quat for first drag") print (ThisQuat) ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat) ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot) Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot) print ("First transform") print (Transform) LastRot = copy.copy (ThisRot) print ("LastRot at end of first drag") print (LastRot) mouse_pt = Point2fT (350,260) ArcBall.click (mouse_pt) mouse_pt = Point2fT (450, 260) ThisQuat = ArcBall.drag (mouse_pt) print ("Quat for second drag") print (ThisQuat) ThisRot = Matrix3fSetRotationFromQuat4f (ThisQuat) ThisRot = Matrix3fMulMatrix3f (LastRot, ThisRot) Transform = Matrix4fSetRotationFromMatrix3f (Transform, ThisRot) print ("Second transform") print (Transform) LastRot = copy.copy (ThisRot) def _test (): # see http://www.python.org/doc/current/lib/module-doctest.html # # doctest introspects the ArcBall module for all docstrings # that look like interactive python sessions and invokes # the same commands then and there as unit tests to compare # the output generated. Very nice for unit testing and # documentation. import doctest, ArcBall return doctest.testmod (ArcBall) if __name__ == "__main__": # Invoke our function that runs python's doctest unit testing tool. _test ()
true
true
f70393d6c78a695c7990d9fcc63b60178e2de3e0
2,786
py
Python
tests/test_filesystem.py
WojciechKusa/datasets
1406a04c3e911cec2680d8bc513653e0cafcaaa4
[ "Apache-2.0" ]
8
2020-12-06T13:04:57.000Z
2021-12-14T23:00:56.000Z
tests/test_filesystem.py
WojciechKusa/datasets
1406a04c3e911cec2680d8bc513653e0cafcaaa4
[ "Apache-2.0" ]
null
null
null
tests/test_filesystem.py
WojciechKusa/datasets
1406a04c3e911cec2680d8bc513653e0cafcaaa4
[ "Apache-2.0" ]
1
2022-01-25T21:15:46.000Z
2022-01-25T21:15:46.000Z
import os import boto3 import fsspec import pytest from moto import mock_s3 from datasets.filesystems import ( COMPRESSION_FILESYSTEMS, HfFileSystem, S3FileSystem, extract_path_from_uri, is_remote_filesystem, ) from .utils import require_lz4, require_zstandard @pytest.fixture(scope="function") def aws_credentials(): """Mocked AWS Credentials for moto.""" os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key" os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key" os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token" os.environ["AWS_SESSION_TOKEN"] = "fake_session_token" @pytest.fixture(scope="function") def s3(aws_credentials): with mock_s3(): yield boto3.client("s3", region_name="us-east-1") def test_extract_path_from_uri(s3): mock_bucket = "moto-mock-s3-bucket" # We need to create the bucket since this is all in Moto's 'virtual' AWS account s3.create_bucket(Bucket=mock_bucket) dataset_path = f"s3://{mock_bucket}" dataset_path = extract_path_from_uri(dataset_path) assert dataset_path.startswith("s3://") is False dataset_path = "./local/path" new_dataset_path = extract_path_from_uri(dataset_path) assert dataset_path == new_dataset_path def test_is_remote_filesystem(): fs = S3FileSystem(key="fake_access_key", secret="fake_secret") is_remote = is_remote_filesystem(fs) assert is_remote is True fs = fsspec.filesystem("file") is_remote = is_remote_filesystem(fs) assert is_remote is False @require_zstandard @require_lz4 @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = str(input_paths[compression_fs_class.protocol]) fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) assert isinstance(fs, compression_fs_class) expected_filename = os.path.basename(input_path) expected_filename = expected_filename[: expected_filename.rindex(".")] assert fs.ls("/") == [expected_filename] with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file): repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token) hffs = HfFileSystem(repo_info=repo_info, token=hf_token) assert sorted(hffs.glob("*")) == [".gitattributes", "data.txt"] with open(text_file) as f: assert hffs.open("data.txt", "r").read() == f.read()
33.166667
116
0.739052
import os import boto3 import fsspec import pytest from moto import mock_s3 from datasets.filesystems import ( COMPRESSION_FILESYSTEMS, HfFileSystem, S3FileSystem, extract_path_from_uri, is_remote_filesystem, ) from .utils import require_lz4, require_zstandard @pytest.fixture(scope="function") def aws_credentials(): os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key" os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key" os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token" os.environ["AWS_SESSION_TOKEN"] = "fake_session_token" @pytest.fixture(scope="function") def s3(aws_credentials): with mock_s3(): yield boto3.client("s3", region_name="us-east-1") def test_extract_path_from_uri(s3): mock_bucket = "moto-mock-s3-bucket" s3.create_bucket(Bucket=mock_bucket) dataset_path = f"s3://{mock_bucket}" dataset_path = extract_path_from_uri(dataset_path) assert dataset_path.startswith("s3://") is False dataset_path = "./local/path" new_dataset_path = extract_path_from_uri(dataset_path) assert dataset_path == new_dataset_path def test_is_remote_filesystem(): fs = S3FileSystem(key="fake_access_key", secret="fake_secret") is_remote = is_remote_filesystem(fs) assert is_remote is True fs = fsspec.filesystem("file") is_remote = is_remote_filesystem(fs) assert is_remote is False @require_zstandard @require_lz4 @pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS) def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file} input_path = str(input_paths[compression_fs_class.protocol]) fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path) assert isinstance(fs, compression_fs_class) expected_filename = os.path.basename(input_path) expected_filename = expected_filename[: expected_filename.rindex(".")] assert fs.ls("/") == [expected_filename] with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file: assert f.read() == expected_file.read() def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file): repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token) hffs = HfFileSystem(repo_info=repo_info, token=hf_token) assert sorted(hffs.glob("*")) == [".gitattributes", "data.txt"] with open(text_file) as f: assert hffs.open("data.txt", "r").read() == f.read()
true
true
f703943fbc79d0f254b8d08ea640cf0e5c86ba5f
3,639
py
Python
facebook_business/adobjects/messengerdestinationpagewelcomemessage.py
MyrikLD/facebook-python-business-sdk
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
[ "CNRI-Python" ]
3
2021-08-06T05:01:39.000Z
2021-08-11T03:31:44.000Z
facebook_business/adobjects/messengerdestinationpagewelcomemessage.py
MyrikLD/facebook-python-business-sdk
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
[ "CNRI-Python" ]
5
2016-09-22T20:18:11.000Z
2018-10-19T00:00:24.000Z
facebook_business/adobjects/messengerdestinationpagewelcomemessage.py
MyrikLD/facebook-python-business-sdk
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
[ "CNRI-Python" ]
1
2021-02-25T16:20:58.000Z
2021-02-25T16:20:58.000Z
# Copyright 2014 Facebook, Inc. # You are hereby granted a non-exclusive, worldwide, royalty-free license to # use, copy, modify, and distribute this software in source code or binary # form for use in connection with the web services and APIs provided by # Facebook. # As with any software that integrates with the Facebook platform, your use # of this software is subject to the Facebook Developer Principles and # Policies [http://developers.facebook.com/policy/]. This copyright notice # shall be included in all copies or substantial portions of the software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from facebook_business.adobjects.abstractobject import AbstractObject from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject from facebook_business.adobjects.objectparser import ObjectParser from facebook_business.api import FacebookRequest from facebook_business.typechecker import TypeChecker """ This class is auto-generated. For any issues or feature requests related to this class, please let us know on github and we'll fix in our codegen framework. We'll not be able to accept pull request for this class. """ class MessengerDestinationPageWelcomeMessage( AbstractCrudObject, ): def __init__(self, fbid=None, parent_id=None, api=None): self._isMessengerDestinationPageWelcomeMessage = True super(MessengerDestinationPageWelcomeMessage, self).__init__(fbid, parent_id, api) class Field(AbstractObject.Field): id = 'id' page_welcome_message_body = 'page_welcome_message_body' page_welcome_message_type = 'page_welcome_message_type' template_name = 'template_name' time_created = 'time_created' time_last_used = 'time_last_used' def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False): from facebook_business.utils import api_utils if batch is None and (success is not None or failure is not None): api_utils.warning('`success` and `failure` callback only work for batch call.') param_types = { } enums = { } request = FacebookRequest( node_id=self['id'], method='GET', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=MessengerDestinationPageWelcomeMessage, api_type='NODE', response_parser=ObjectParser(reuse_object=self), ) request.add_params(params) request.add_fields(fields) if batch is not None: request.add_to_batch(batch, success=success, failure=failure) return request elif pending: return request else: self.assure_call() return request.execute() _field_types = { 'id': 'string', 'page_welcome_message_body': 'string', 'page_welcome_message_type': 'string', 'template_name': 'string', 'time_created': 'datetime', 'time_last_used': 'datetime', } @classmethod def _get_field_enum_info(cls): field_enum_info = {} return field_enum_info
38.305263
103
0.706238
from facebook_business.adobjects.abstractobject import AbstractObject from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject from facebook_business.adobjects.objectparser import ObjectParser from facebook_business.api import FacebookRequest from facebook_business.typechecker import TypeChecker class MessengerDestinationPageWelcomeMessage( AbstractCrudObject, ): def __init__(self, fbid=None, parent_id=None, api=None): self._isMessengerDestinationPageWelcomeMessage = True super(MessengerDestinationPageWelcomeMessage, self).__init__(fbid, parent_id, api) class Field(AbstractObject.Field): id = 'id' page_welcome_message_body = 'page_welcome_message_body' page_welcome_message_type = 'page_welcome_message_type' template_name = 'template_name' time_created = 'time_created' time_last_used = 'time_last_used' def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False): from facebook_business.utils import api_utils if batch is None and (success is not None or failure is not None): api_utils.warning('`success` and `failure` callback only work for batch call.') param_types = { } enums = { } request = FacebookRequest( node_id=self['id'], method='GET', endpoint='/', api=self._api, param_checker=TypeChecker(param_types, enums), target_class=MessengerDestinationPageWelcomeMessage, api_type='NODE', response_parser=ObjectParser(reuse_object=self), ) request.add_params(params) request.add_fields(fields) if batch is not None: request.add_to_batch(batch, success=success, failure=failure) return request elif pending: return request else: self.assure_call() return request.execute() _field_types = { 'id': 'string', 'page_welcome_message_body': 'string', 'page_welcome_message_type': 'string', 'template_name': 'string', 'time_created': 'datetime', 'time_last_used': 'datetime', } @classmethod def _get_field_enum_info(cls): field_enum_info = {} return field_enum_info
true
true
f70394ae6aab6d7f593c2084d4429207c49d3d56
1,245
py
Python
courses/src/task_app/serializers/task_file.py
yuramorozov01/courses_system
582532b2a2753d89642e1e8dbee0f369774638b1
[ "Apache-2.0" ]
null
null
null
courses/src/task_app/serializers/task_file.py
yuramorozov01/courses_system
582532b2a2753d89642e1e8dbee0f369774638b1
[ "Apache-2.0" ]
null
null
null
courses/src/task_app/serializers/task_file.py
yuramorozov01/courses_system
582532b2a2753d89642e1e8dbee0f369774638b1
[ "Apache-2.0" ]
null
null
null
from base_app.serializers import CustomUserSerializer from rest_framework import serializers from task_app.models import TaskFile class TaskFileCreateSerializer(serializers.ModelSerializer): '''Serializer for creating task files''' author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile fields = '__all__' read_only_fields = ['author', 'task'] class TaskFileDetailsSerializer(serializers.ModelSerializer): '''Serializer for a specified task file This serializer provides detailed information about task file.''' file = serializers.FileField(read_only=True, allow_empty_file=True) author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile exclude = ['task'] read_only_fields = ['file', 'author'] class TaskFileUpdateSerializer(serializers.ModelSerializer): '''Serializer for updating a specified task file. With this serializer task file can be updated only by a task file author. ''' file = serializers.FileField(allow_empty_file=True) author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile fields = '__all__' read_only_fields = ['task', 'author']
29.642857
77
0.718072
from base_app.serializers import CustomUserSerializer from rest_framework import serializers from task_app.models import TaskFile class TaskFileCreateSerializer(serializers.ModelSerializer): author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile fields = '__all__' read_only_fields = ['author', 'task'] class TaskFileDetailsSerializer(serializers.ModelSerializer): file = serializers.FileField(read_only=True, allow_empty_file=True) author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile exclude = ['task'] read_only_fields = ['file', 'author'] class TaskFileUpdateSerializer(serializers.ModelSerializer): file = serializers.FileField(allow_empty_file=True) author = CustomUserSerializer(read_only=True) class Meta: model = TaskFile fields = '__all__' read_only_fields = ['task', 'author']
true
true
f70394c0c84001797c89a541d9f24d7cf0bd3eb7
11,469
py
Python
ckan/model/license.py
sabinem/ckan
dfad5d98a644a76939a57872073ef27dc7c68d86
[ "Apache-2.0" ]
1
2020-07-09T02:10:18.000Z
2020-07-09T02:10:18.000Z
ckan/model/license.py
sabinem/ckan
dfad5d98a644a76939a57872073ef27dc7c68d86
[ "Apache-2.0" ]
3
2020-03-24T17:56:04.000Z
2021-02-02T22:16:31.000Z
ckan/model/license.py
sabinem/ckan
dfad5d98a644a76939a57872073ef27dc7c68d86
[ "Apache-2.0" ]
null
null
null
# encoding: utf-8 import datetime import re import requests from ckan.common import config from ckan.common import asbool from six import text_type, string_types from ckan.common import _, json import ckan.lib.maintain as maintain log = __import__('logging').getLogger(__name__) class License(object): """Domain object for a license.""" def __init__(self, data): # convert old keys if necessary if 'is_okd_compliant' in data: data['od_conformance'] = 'approved' \ if asbool(data['is_okd_compliant']) else '' del data['is_okd_compliant'] if 'is_osi_compliant' in data: data['osd_conformance'] = 'approved' \ if asbool(data['is_osi_compliant']) else '' del data['is_osi_compliant'] self._data = data for (key, value) in self._data.items(): if key == 'date_created': # Parse ISO formatted datetime. value = datetime.datetime(*map(int, re.split('[^\d]', value))) self._data[key] = value elif isinstance(value, str): # Convert str to unicode (keeps Pylons and SQLAlchemy happy). value = value.decode('utf8') self._data[key] = value def __getattr__(self, name): if name == 'is_okd_compliant': log.warn('license.is_okd_compliant is deprecated - use ' 'od_conformance instead.') return self._data['od_conformance'] == 'approved' if name == 'is_osi_compliant': log.warn('license.is_osi_compliant is deprecated - use ' 'osd_conformance instead.') return self._data['osd_conformance'] == 'approved' return self._data[name] @maintain.deprecated("License.__getitem__() is deprecated and will be " "removed in a future version of CKAN. Instead, " "please use attribute access.") def __getitem__(self, key): '''NB This method is deprecated and will be removed in a future version of CKAN. Instead, please use attribute access. ''' return self.__getattr__(key) def isopen(self): if not hasattr(self, '_isopen'): self._isopen = self.od_conformance == 'approved' or \ self.osd_conformance == 'approved' return self._isopen @maintain.deprecated("License.as_dict() is deprecated and will be " "removed in a future version of CKAN. Instead, " "please use attribute access.") def as_dict(self): '''NB This method is deprecated and will be removed in a future version of CKAN. Instead, please use attribute access. ''' data = self._data.copy() if 'date_created' in data: value = data['date_created'] value = value.isoformat() data['date_created'] = value # deprecated keys if 'od_conformance' in data: data['is_okd_compliant'] = data['od_conformance'] == 'approved' if 'osd_conformance' in data: data['is_osi_compliant'] = data['osd_conformance'] == 'approved' return data class LicenseRegister(object): """Dictionary-like interface to a group of licenses.""" def __init__(self): group_url = config.get('licenses_group_url', None) if group_url: self.load_licenses(group_url) else: default_license_list = [ LicenseNotSpecified(), LicenseOpenDataCommonsPDDL(), LicenseOpenDataCommonsOpenDatabase(), LicenseOpenDataAttribution(), LicenseCreativeCommonsZero(), LicenseCreativeCommonsAttribution(), LicenseCreativeCommonsAttributionShareAlike(), LicenseGNUFreeDocument(), LicenseOtherOpen(), LicenseOtherPublicDomain(), LicenseOtherAttribution(), LicenseOpenGovernment(), LicenseCreativeCommonsNonCommercial(), LicenseOtherNonCommercial(), LicenseOtherClosed(), ] self._create_license_list(default_license_list) def load_licenses(self, license_url): try: if license_url.startswith('file://'): with open(license_url.replace('file://', ''), 'r') as f: license_data = json.load(f) else: response = requests.get(license_url) license_data = response.json() except requests.RequestException as e: msg = "Couldn't get the licenses file {}: {}".format(license_url, e) raise Exception(msg) except ValueError as e: msg = "Couldn't parse the licenses file {}: {}".format(license_url, e) raise Exception(msg) for license in license_data: if isinstance(license, string_types): license = license_data[license] if license.get('title'): license['title'] = _(license['title']) self._create_license_list(license_data, license_url) def _create_license_list(self, license_data, license_url=''): if isinstance(license_data, dict): self.licenses = [License(entity) for entity in license_data.values()] elif isinstance(license_data, list): self.licenses = [License(entity) for entity in license_data] else: msg = "Licenses at %s must be dictionary or list" % license_url raise ValueError(msg) def __getitem__(self, key, default=Exception): for license in self.licenses: if key == license.id: return license if default != Exception: return default else: raise KeyError("License not found: %s" % key) def get(self, key, default=None): return self.__getitem__(key, default=default) def keys(self): return [license.id for license in self.licenses] def values(self): return self.licenses def items(self): return [(license.id, license) for license in self.licenses] def __iter__(self): return iter(self.keys()) def __len__(self): return len(self.licenses) class DefaultLicense(dict): ''' The license was a dict but this did not allow translation of the title. This is a slightly changed dict that allows us to have the title as a property and so translated. ''' domain_content = False domain_data = False domain_software = False family = '' is_generic = False od_conformance = 'not reviewed' osd_conformance = 'not reviewed' maintainer = '' status = 'active' url = '' title = '' id = '' keys = ['domain_content', 'id', 'domain_data', 'domain_software', 'family', 'is_generic', 'od_conformance', 'osd_conformance', 'maintainer', 'status', 'url', 'title'] def __getitem__(self, key): ''' behave like a dict but get from attributes ''' if key in self.keys: value = getattr(self, key) if isinstance(value, str): return text_type(value) else: return value else: raise KeyError() def copy(self): ''' create a dict of the license used by the licenses api ''' out = {} for key in self.keys: out[key] = text_type(getattr(self, key)) return out class LicenseNotSpecified(DefaultLicense): id = "notspecified" is_generic = True @property def title(self): return _("License not specified") class LicenseOpenDataCommonsPDDL(DefaultLicense): domain_data = True id = "odc-pddl" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/odc-pddl" @property def title(self): return _("Open Data Commons Public Domain Dedication and License (PDDL)") class LicenseOpenDataCommonsOpenDatabase(DefaultLicense): domain_data = True id = "odc-odbl" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/odc-odbl" @property def title(self): return _("Open Data Commons Open Database License (ODbL)") class LicenseOpenDataAttribution(DefaultLicense): domain_data = True id = "odc-by" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/odc-by" @property def title(self): return _("Open Data Commons Attribution License") class LicenseCreativeCommonsZero(DefaultLicense): domain_content = True domain_data = True id = "cc-zero" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/cc-zero" @property def title(self): return _("Creative Commons CCZero") class LicenseCreativeCommonsAttribution(DefaultLicense): id = "cc-by" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/cc-by" @property def title(self): return _("Creative Commons Attribution") class LicenseCreativeCommonsAttributionShareAlike(DefaultLicense): domain_content = True id = "cc-by-sa" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/cc-by-sa" @property def title(self): return _("Creative Commons Attribution Share-Alike") class LicenseGNUFreeDocument(DefaultLicense): domain_content = True id = "gfdl" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/gfdl" @property def title(self): return _("GNU Free Documentation License") class LicenseOtherOpen(DefaultLicense): domain_content = True id = "other-open" is_generic = True od_conformance = 'approved' @property def title(self): return _("Other (Open)") class LicenseOtherPublicDomain(DefaultLicense): domain_content = True id = "other-pd" is_generic = True od_conformance = 'approved' @property def title(self): return _("Other (Public Domain)") class LicenseOtherAttribution(DefaultLicense): domain_content = True id = "other-at" is_generic = True od_conformance = 'approved' @property def title(self): return _("Other (Attribution)") class LicenseOpenGovernment(DefaultLicense): domain_content = True id = "uk-ogl" od_conformance = 'approved' # CS: bad_spelling ignore url = "http://reference.data.gov.uk/id/open-government-licence" @property def title(self): # CS: bad_spelling ignore return _("UK Open Government Licence (OGL)") class LicenseCreativeCommonsNonCommercial(DefaultLicense): id = "cc-nc" url = "http://creativecommons.org/licenses/by-nc/2.0/" @property def title(self): return _("Creative Commons Non-Commercial (Any)") class LicenseOtherNonCommercial(DefaultLicense): id = "other-nc" is_generic = True @property def title(self): return _("Other (Non-Commercial)") class LicenseOtherClosed(DefaultLicense): id = "other-closed" is_generic = True @property def title(self): return _("Other (Not Open)")
31.250681
82
0.608684
import datetime import re import requests from ckan.common import config from ckan.common import asbool from six import text_type, string_types from ckan.common import _, json import ckan.lib.maintain as maintain log = __import__('logging').getLogger(__name__) class License(object): def __init__(self, data): if 'is_okd_compliant' in data: data['od_conformance'] = 'approved' \ if asbool(data['is_okd_compliant']) else '' del data['is_okd_compliant'] if 'is_osi_compliant' in data: data['osd_conformance'] = 'approved' \ if asbool(data['is_osi_compliant']) else '' del data['is_osi_compliant'] self._data = data for (key, value) in self._data.items(): if key == 'date_created': value = datetime.datetime(*map(int, re.split('[^\d]', value))) self._data[key] = value elif isinstance(value, str): value = value.decode('utf8') self._data[key] = value def __getattr__(self, name): if name == 'is_okd_compliant': log.warn('license.is_okd_compliant is deprecated - use ' 'od_conformance instead.') return self._data['od_conformance'] == 'approved' if name == 'is_osi_compliant': log.warn('license.is_osi_compliant is deprecated - use ' 'osd_conformance instead.') return self._data['osd_conformance'] == 'approved' return self._data[name] @maintain.deprecated("License.__getitem__() is deprecated and will be " "removed in a future version of CKAN. Instead, " "please use attribute access.") def __getitem__(self, key): return self.__getattr__(key) def isopen(self): if not hasattr(self, '_isopen'): self._isopen = self.od_conformance == 'approved' or \ self.osd_conformance == 'approved' return self._isopen @maintain.deprecated("License.as_dict() is deprecated and will be " "removed in a future version of CKAN. Instead, " "please use attribute access.") def as_dict(self): data = self._data.copy() if 'date_created' in data: value = data['date_created'] value = value.isoformat() data['date_created'] = value if 'od_conformance' in data: data['is_okd_compliant'] = data['od_conformance'] == 'approved' if 'osd_conformance' in data: data['is_osi_compliant'] = data['osd_conformance'] == 'approved' return data class LicenseRegister(object): def __init__(self): group_url = config.get('licenses_group_url', None) if group_url: self.load_licenses(group_url) else: default_license_list = [ LicenseNotSpecified(), LicenseOpenDataCommonsPDDL(), LicenseOpenDataCommonsOpenDatabase(), LicenseOpenDataAttribution(), LicenseCreativeCommonsZero(), LicenseCreativeCommonsAttribution(), LicenseCreativeCommonsAttributionShareAlike(), LicenseGNUFreeDocument(), LicenseOtherOpen(), LicenseOtherPublicDomain(), LicenseOtherAttribution(), LicenseOpenGovernment(), LicenseCreativeCommonsNonCommercial(), LicenseOtherNonCommercial(), LicenseOtherClosed(), ] self._create_license_list(default_license_list) def load_licenses(self, license_url): try: if license_url.startswith('file://'): with open(license_url.replace('file://', ''), 'r') as f: license_data = json.load(f) else: response = requests.get(license_url) license_data = response.json() except requests.RequestException as e: msg = "Couldn't get the licenses file {}: {}".format(license_url, e) raise Exception(msg) except ValueError as e: msg = "Couldn't parse the licenses file {}: {}".format(license_url, e) raise Exception(msg) for license in license_data: if isinstance(license, string_types): license = license_data[license] if license.get('title'): license['title'] = _(license['title']) self._create_license_list(license_data, license_url) def _create_license_list(self, license_data, license_url=''): if isinstance(license_data, dict): self.licenses = [License(entity) for entity in license_data.values()] elif isinstance(license_data, list): self.licenses = [License(entity) for entity in license_data] else: msg = "Licenses at %s must be dictionary or list" % license_url raise ValueError(msg) def __getitem__(self, key, default=Exception): for license in self.licenses: if key == license.id: return license if default != Exception: return default else: raise KeyError("License not found: %s" % key) def get(self, key, default=None): return self.__getitem__(key, default=default) def keys(self): return [license.id for license in self.licenses] def values(self): return self.licenses def items(self): return [(license.id, license) for license in self.licenses] def __iter__(self): return iter(self.keys()) def __len__(self): return len(self.licenses) class DefaultLicense(dict): domain_content = False domain_data = False domain_software = False family = '' is_generic = False od_conformance = 'not reviewed' osd_conformance = 'not reviewed' maintainer = '' status = 'active' url = '' title = '' id = '' keys = ['domain_content', 'id', 'domain_data', 'domain_software', 'family', 'is_generic', 'od_conformance', 'osd_conformance', 'maintainer', 'status', 'url', 'title'] def __getitem__(self, key): if key in self.keys: value = getattr(self, key) if isinstance(value, str): return text_type(value) else: return value else: raise KeyError() def copy(self): out = {} for key in self.keys: out[key] = text_type(getattr(self, key)) return out class LicenseNotSpecified(DefaultLicense): id = "notspecified" is_generic = True @property def title(self): return _("License not specified") class LicenseOpenDataCommonsPDDL(DefaultLicense): domain_data = True id = "odc-pddl" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/odc-pddl" @property def title(self): return _("Open Data Commons Public Domain Dedication and License (PDDL)") class LicenseOpenDataCommonsOpenDatabase(DefaultLicense): domain_data = True id = "odc-odbl" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/odc-odbl" @property def title(self): return _("Open Data Commons Open Database License (ODbL)") class LicenseOpenDataAttribution(DefaultLicense): domain_data = True id = "odc-by" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/odc-by" @property def title(self): return _("Open Data Commons Attribution License") class LicenseCreativeCommonsZero(DefaultLicense): domain_content = True domain_data = True id = "cc-zero" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/cc-zero" @property def title(self): return _("Creative Commons CCZero") class LicenseCreativeCommonsAttribution(DefaultLicense): id = "cc-by" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/cc-by" @property def title(self): return _("Creative Commons Attribution") class LicenseCreativeCommonsAttributionShareAlike(DefaultLicense): domain_content = True id = "cc-by-sa" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/cc-by-sa" @property def title(self): return _("Creative Commons Attribution Share-Alike") class LicenseGNUFreeDocument(DefaultLicense): domain_content = True id = "gfdl" od_conformance = 'approved' url = "http://www.opendefinition.org/licenses/gfdl" @property def title(self): return _("GNU Free Documentation License") class LicenseOtherOpen(DefaultLicense): domain_content = True id = "other-open" is_generic = True od_conformance = 'approved' @property def title(self): return _("Other (Open)") class LicenseOtherPublicDomain(DefaultLicense): domain_content = True id = "other-pd" is_generic = True od_conformance = 'approved' @property def title(self): return _("Other (Public Domain)") class LicenseOtherAttribution(DefaultLicense): domain_content = True id = "other-at" is_generic = True od_conformance = 'approved' @property def title(self): return _("Other (Attribution)") class LicenseOpenGovernment(DefaultLicense): domain_content = True id = "uk-ogl" od_conformance = 'approved' url = "http://reference.data.gov.uk/id/open-government-licence" @property def title(self): return _("UK Open Government Licence (OGL)") class LicenseCreativeCommonsNonCommercial(DefaultLicense): id = "cc-nc" url = "http://creativecommons.org/licenses/by-nc/2.0/" @property def title(self): return _("Creative Commons Non-Commercial (Any)") class LicenseOtherNonCommercial(DefaultLicense): id = "other-nc" is_generic = True @property def title(self): return _("Other (Non-Commercial)") class LicenseOtherClosed(DefaultLicense): id = "other-closed" is_generic = True @property def title(self): return _("Other (Not Open)")
true
true
f70394c4b4c5b9d7822853ff5865b2756c9dd6bc
1,768
py
Python
Chapter 10/code/category_predictor.py
shivampotdar/Artificial-Intelligence-with-Python
00221c3b1a6d8003765d1ca48b5c95f86da375d9
[ "MIT" ]
387
2017-02-11T18:28:50.000Z
2022-03-27T01:16:05.000Z
Chapter 10/code/category_predictor.py
shivampotdar/Artificial-Intelligence-with-Python
00221c3b1a6d8003765d1ca48b5c95f86da375d9
[ "MIT" ]
18
2017-12-15T03:10:25.000Z
2021-04-20T14:32:43.000Z
Chapter 10/code/category_predictor.py
shivampotdar/Artificial-Intelligence-with-Python
00221c3b1a6d8003765d1ca48b5c95f86da375d9
[ "MIT" ]
407
2017-01-23T15:18:33.000Z
2022-03-16T05:39:02.000Z
from sklearn.datasets import fetch_20newsgroups from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import CountVectorizer # Define the category map category_map = {'talk.politics.misc': 'Politics', 'rec.autos': 'Autos', 'rec.sport.hockey': 'Hockey', 'sci.electronics': 'Electronics', 'sci.med': 'Medicine'} # Get the training dataset training_data = fetch_20newsgroups(subset='train', categories=category_map.keys(), shuffle=True, random_state=5) # Build a count vectorizer and extract term counts count_vectorizer = CountVectorizer() train_tc = count_vectorizer.fit_transform(training_data.data) print("\nDimensions of training data:", train_tc.shape) # Create the tf-idf transformer tfidf = TfidfTransformer() train_tfidf = tfidf.fit_transform(train_tc) # Define test data input_data = [ 'You need to be careful with cars when you are driving on slippery roads', 'A lot of devices can be operated wirelessly', 'Players need to be careful when they are close to goal posts', 'Political debates help us understand the perspectives of both sides' ] # Train a Multinomial Naive Bayes classifier classifier = MultinomialNB().fit(train_tfidf, training_data.target) # Transform input data using count vectorizer input_tc = count_vectorizer.transform(input_data) # Transform vectorized data using tfidf transformer input_tfidf = tfidf.transform(input_tc) # Predict the output categories predictions = classifier.predict(input_tfidf) # Print the outputs for sent, category in zip(input_data, predictions): print('\nInput:', sent, '\nPredicted category:', \ category_map[training_data.target_names[category]])
36.081633
79
0.769796
from sklearn.datasets import fetch_20newsgroups from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import CountVectorizer category_map = {'talk.politics.misc': 'Politics', 'rec.autos': 'Autos', 'rec.sport.hockey': 'Hockey', 'sci.electronics': 'Electronics', 'sci.med': 'Medicine'} training_data = fetch_20newsgroups(subset='train', categories=category_map.keys(), shuffle=True, random_state=5) count_vectorizer = CountVectorizer() train_tc = count_vectorizer.fit_transform(training_data.data) print("\nDimensions of training data:", train_tc.shape) tfidf = TfidfTransformer() train_tfidf = tfidf.fit_transform(train_tc) input_data = [ 'You need to be careful with cars when you are driving on slippery roads', 'A lot of devices can be operated wirelessly', 'Players need to be careful when they are close to goal posts', 'Political debates help us understand the perspectives of both sides' ] classifier = MultinomialNB().fit(train_tfidf, training_data.target) input_tc = count_vectorizer.transform(input_data) input_tfidf = tfidf.transform(input_tc) predictions = classifier.predict(input_tfidf) for sent, category in zip(input_data, predictions): print('\nInput:', sent, '\nPredicted category:', \ category_map[training_data.target_names[category]])
true
true
f7039603998c5edc3cabdaf934821dfecab387fe
1,434
py
Python
ada_friend_app/api/login.py
ratopythonista/ada-friend
0b40afb3b7b4b60980663e962ccd455d6436345d
[ "MIT" ]
null
null
null
ada_friend_app/api/login.py
ratopythonista/ada-friend
0b40afb3b7b4b60980663e962ccd455d6436345d
[ "MIT" ]
null
null
null
ada_friend_app/api/login.py
ratopythonista/ada-friend
0b40afb3b7b4b60980663e962ccd455d6436345d
[ "MIT" ]
null
null
null
from loguru import logger from flask import request from flasgger import swag_from from flask_restful import Resource from jwt.exceptions import ExpiredSignatureError from ada_friend_app.modulo.cripto import Sha256 from ada_friend_app.modulo.jwt_auth import Token from ada_friend_app.api.resposta_api import Resposta from ada_friend_app.servico.mod_database import Database class Login(Resource): @swag_from('../../docs/api/login_post.yml') def post(self): json = request.json if json.get('email', False) and json.get('senha', False): senha = Sha256(json['senha']).hash usuario = Database().get_document('usuarios', {'_id': json['email'], 'senha': senha}) if usuario: usuario = usuario[0] logger.debug(f"{json['email']} - CONECTADO") try: token = Token.gerar(usuario['senha'], usuario['_id']) return Resposta.token_validado(token) except ExpiredSignatureError: return Resposta.nao_aceito('Token expirado') except Exception as e: return Resposta.error(str(e)) else: logger.debug(f"{json['email']} - ERRO DE ACESSO") return Resposta.nao_aceito('Usuário ou senha inválido!') else: return Resposta.error('JSON Inválido!')
37.736842
97
0.608089
from loguru import logger from flask import request from flasgger import swag_from from flask_restful import Resource from jwt.exceptions import ExpiredSignatureError from ada_friend_app.modulo.cripto import Sha256 from ada_friend_app.modulo.jwt_auth import Token from ada_friend_app.api.resposta_api import Resposta from ada_friend_app.servico.mod_database import Database class Login(Resource): @swag_from('../../docs/api/login_post.yml') def post(self): json = request.json if json.get('email', False) and json.get('senha', False): senha = Sha256(json['senha']).hash usuario = Database().get_document('usuarios', {'_id': json['email'], 'senha': senha}) if usuario: usuario = usuario[0] logger.debug(f"{json['email']} - CONECTADO") try: token = Token.gerar(usuario['senha'], usuario['_id']) return Resposta.token_validado(token) except ExpiredSignatureError: return Resposta.nao_aceito('Token expirado') except Exception as e: return Resposta.error(str(e)) else: logger.debug(f"{json['email']} - ERRO DE ACESSO") return Resposta.nao_aceito('Usuário ou senha inválido!') else: return Resposta.error('JSON Inválido!')
true
true
f70396193135830ef2d1de8e357842ea1ef0eea2
747
py
Python
PaddleCV/PaddleDetection/ppdet/experimental/__init__.py
XiaoguangHu01/models
a95d49323ed504e5a9164586f171f408954fd43a
[ "Apache-2.0" ]
7,782
2019-10-25T09:39:37.000Z
2022-03-31T13:44:14.000Z
static/ppdet/experimental/__init__.py
siqi-yang/PaddleDetection
d7383ad99c69e03f984ead52cc645d17f4729837
[ "Apache-2.0" ]
3,499
2019-10-29T12:37:40.000Z
2022-03-31T14:51:56.000Z
static/ppdet/experimental/__init__.py
siqi-yang/PaddleDetection
d7383ad99c69e03f984ead52cc645d17f4729837
[ "Apache-2.0" ]
1,874
2019-10-28T04:21:58.000Z
2022-03-31T05:41:21.000Z
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from .mixed_precision import * from . import mixed_precision __all__ = mixed_precision.__all__
35.571429
74
0.776439
from __future__ import absolute_import from .mixed_precision import * from . import mixed_precision __all__ = mixed_precision.__all__
true
true
f70396c8be176b604dce67ff49b19ec44c86cdc8
1,328
py
Python
rasa/cli/arguments/data.py
deepmipt/rasa
f0cc0ff6e515df2249998ff4e788009cccaecc02
[ "Apache-2.0" ]
2
2021-02-19T07:19:27.000Z
2022-01-19T09:00:43.000Z
rasa/cli/arguments/data.py
RakibulAsheeque/rasa
7d3804cd081c73d78ab5e973f95a55845eed1e89
[ "Apache-2.0" ]
6
2020-01-28T22:55:28.000Z
2022-02-10T00:20:45.000Z
rasa/cli/arguments/data.py
RakibulAsheeque/rasa
7d3804cd081c73d78ab5e973f95a55845eed1e89
[ "Apache-2.0" ]
1
2022-01-19T09:00:45.000Z
2022-01-19T09:00:45.000Z
import argparse from rasa.cli.arguments.default_arguments import ( add_nlu_data_param, add_out_param, add_data_param, add_domain_param, ) def set_convert_arguments(parser: argparse.ArgumentParser): add_data_param(parser, required=True, default=None, data_type="Rasa NLU ") add_out_param( parser, required=True, default=None, help_text="File where to save training data in Rasa format.", ) parser.add_argument("-l", "--language", default="en", help="Language of data.") parser.add_argument( "-f", "--format", required=True, choices=["json", "md"], help="Output format the training data should be converted into.", ) def set_split_arguments(parser: argparse.ArgumentParser): add_nlu_data_param(parser, help_text="File or folder containing your NLU data.") parser.add_argument( "--training-fraction", type=float, default=0.8, help="Percentage of the data which should be in the training data.", ) add_out_param( parser, default="train_test_split", help_text="Directory where the split files should be stored.", ) def set_validator_arguments(parser: argparse.ArgumentParser): add_domain_param(parser) add_data_param(parser)
25.538462
84
0.665663
import argparse from rasa.cli.arguments.default_arguments import ( add_nlu_data_param, add_out_param, add_data_param, add_domain_param, ) def set_convert_arguments(parser: argparse.ArgumentParser): add_data_param(parser, required=True, default=None, data_type="Rasa NLU ") add_out_param( parser, required=True, default=None, help_text="File where to save training data in Rasa format.", ) parser.add_argument("-l", "--language", default="en", help="Language of data.") parser.add_argument( "-f", "--format", required=True, choices=["json", "md"], help="Output format the training data should be converted into.", ) def set_split_arguments(parser: argparse.ArgumentParser): add_nlu_data_param(parser, help_text="File or folder containing your NLU data.") parser.add_argument( "--training-fraction", type=float, default=0.8, help="Percentage of the data which should be in the training data.", ) add_out_param( parser, default="train_test_split", help_text="Directory where the split files should be stored.", ) def set_validator_arguments(parser: argparse.ArgumentParser): add_domain_param(parser) add_data_param(parser)
true
true
f703974e9d75f9f9f38c70017d90be9c33941f4b
3,275
py
Python
src/trading_simulation/simulation.py
andrzejmalota/StockPricePrediction
a6d7da353b706fb2d970f2883841db14d896268f
[ "MIT" ]
1
2020-02-28T15:37:35.000Z
2020-02-28T15:37:35.000Z
src/trading_simulation/simulation.py
andrzejmalota/StockPricePrediction
a6d7da353b706fb2d970f2883841db14d896268f
[ "MIT" ]
null
null
null
src/trading_simulation/simulation.py
andrzejmalota/StockPricePrediction
a6d7da353b706fb2d970f2883841db14d896268f
[ "MIT" ]
1
2020-07-09T02:41:15.000Z
2020-07-09T02:41:15.000Z
import sys import matplotlib.pyplot as plt import numpy as np import pandas as pd class Simulation: def __init__(self, init_investment, stock_returns, strategy, predicted_movements=None): self.init_investment = init_investment self.predicted_movements = predicted_movements self.stock_returns = stock_returns self.strategy = strategy self.action_history = [] self.account_history = [init_investment] self.__actual_investment = 0 self.step = 0 self.return_on_investment = 0 self.profit_on_investment = 0 def start(self): for self.step in range(len(self.stock_returns)): if self.predicted_movements is not None: action = self.strategy.decide(self.predicted_movements[self.step]) else: action = self.strategy.decide(self.step) self.__make_transaction(action) def __make_transaction(self, action): self.action_history.append(action) if action == 'buy': self.__buy() elif action == 'hold': self.__hold() elif action == 'sell': self.__sell() elif action == 'wait': self.__wait() else: sys.exit('Action not implemented, exiting program!') def get_investment_performance(self): self.return_on_investment = (self.account_history[-1] - self.init_investment) / self.init_investment self.profit_on_investment = self.account_history[-1] - self.init_investment return {'return': self.return_on_investment, 'profit': self.profit_on_investment} def plot_trading_history(self, stock_prices, date): date = date.iloc[-len(stock_prices-1):] stock_prices = np.insert(stock_prices, 0, stock_prices[0]) fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(40, 20)) ax1.plot(stock_prices, color='black', label='Cena zamknięcia akcji') actions = pd.DataFrame(self.action_history) buy_idx = actions[actions[0] == 'buy'].index.to_list() sell_idx = actions[actions[0] == 'sell'].index.to_list() stock_prices = np.array(stock_prices) ax1.scatter(buy_idx, stock_prices[buy_idx], color='green', s=40, label='Kupno') ax1.scatter(sell_idx, stock_prices[sell_idx], color='red', s=40, label='Sprzedaż') ax1.legend() ax2.plot(self.account_history[:-1], label='Kapitał') plt.xlabel('Krok czasowy') ax1.set_ylabel('Cena akcji') ax2.set_ylabel('Kapitał') ax2.legend() plt.show() def __calculate_daily_profit(self): self.__actual_investment += self.__actual_investment * self.stock_returns[self.step] def __buy(self): self.__actual_investment = self.account_history[self.step] self.__calculate_daily_profit() self.account_history.append(self.__actual_investment) def __hold(self): self.__calculate_daily_profit() self.account_history.append(self.__actual_investment) def __sell(self): self.account_history.append(self.__actual_investment) self.__actual_investment = 0 def __wait(self): self.account_history.append(self.account_history[self.step-1])
38.988095
108
0.654656
import sys import matplotlib.pyplot as plt import numpy as np import pandas as pd class Simulation: def __init__(self, init_investment, stock_returns, strategy, predicted_movements=None): self.init_investment = init_investment self.predicted_movements = predicted_movements self.stock_returns = stock_returns self.strategy = strategy self.action_history = [] self.account_history = [init_investment] self.__actual_investment = 0 self.step = 0 self.return_on_investment = 0 self.profit_on_investment = 0 def start(self): for self.step in range(len(self.stock_returns)): if self.predicted_movements is not None: action = self.strategy.decide(self.predicted_movements[self.step]) else: action = self.strategy.decide(self.step) self.__make_transaction(action) def __make_transaction(self, action): self.action_history.append(action) if action == 'buy': self.__buy() elif action == 'hold': self.__hold() elif action == 'sell': self.__sell() elif action == 'wait': self.__wait() else: sys.exit('Action not implemented, exiting program!') def get_investment_performance(self): self.return_on_investment = (self.account_history[-1] - self.init_investment) / self.init_investment self.profit_on_investment = self.account_history[-1] - self.init_investment return {'return': self.return_on_investment, 'profit': self.profit_on_investment} def plot_trading_history(self, stock_prices, date): date = date.iloc[-len(stock_prices-1):] stock_prices = np.insert(stock_prices, 0, stock_prices[0]) fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(40, 20)) ax1.plot(stock_prices, color='black', label='Cena zamknięcia akcji') actions = pd.DataFrame(self.action_history) buy_idx = actions[actions[0] == 'buy'].index.to_list() sell_idx = actions[actions[0] == 'sell'].index.to_list() stock_prices = np.array(stock_prices) ax1.scatter(buy_idx, stock_prices[buy_idx], color='green', s=40, label='Kupno') ax1.scatter(sell_idx, stock_prices[sell_idx], color='red', s=40, label='Sprzedaż') ax1.legend() ax2.plot(self.account_history[:-1], label='Kapitał') plt.xlabel('Krok czasowy') ax1.set_ylabel('Cena akcji') ax2.set_ylabel('Kapitał') ax2.legend() plt.show() def __calculate_daily_profit(self): self.__actual_investment += self.__actual_investment * self.stock_returns[self.step] def __buy(self): self.__actual_investment = self.account_history[self.step] self.__calculate_daily_profit() self.account_history.append(self.__actual_investment) def __hold(self): self.__calculate_daily_profit() self.account_history.append(self.__actual_investment) def __sell(self): self.account_history.append(self.__actual_investment) self.__actual_investment = 0 def __wait(self): self.account_history.append(self.account_history[self.step-1])
true
true
f70398596acf9ff285114deab4a95eb353a9e137
5,676
py
Python
src/interactive/azext_interactive/azclishell/configuration.py
mayank88mahajan/azure-cli-extensions
8bd389a1877bffd14052bec5519ce75dc6fc34cf
[ "MIT" ]
1
2019-05-10T19:58:09.000Z
2019-05-10T19:58:09.000Z
src/interactive/azext_interactive/azclishell/configuration.py
mayank88mahajan/azure-cli-extensions
8bd389a1877bffd14052bec5519ce75dc6fc34cf
[ "MIT" ]
null
null
null
src/interactive/azext_interactive/azclishell/configuration.py
mayank88mahajan/azure-cli-extensions
8bd389a1877bffd14052bec5519ce75dc6fc34cf
[ "MIT" ]
1
2021-07-28T14:50:54.000Z
2021-07-28T14:50:54.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import os from six.moves import configparser from prompt_toolkit import prompt # pylint: disable=import-error from azure.cli.core._help import PRIVACY_STATEMENT SELECT_SYMBOL = { 'outside': '#', 'query': '??', 'example': '::', 'exit_code': '$', 'scope': '%%', 'unscope': '..' } GESTURE_INFO = { SELECT_SYMBOL['outside'] + "[cmd]": "use commands outside the application", # pylint: disable=line-too-long "[cmd] + [param] +" + "\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Inject jmespath query from previous command", "\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Jmespath query of the previous command", "[cmd] " + SELECT_SYMBOL['example'] + " [num]": "do a step by step tutorial of example", SELECT_SYMBOL['exit_code']: "get the exit code of the previous command", SELECT_SYMBOL['scope'] + '[cmd]': "set a scope, and scopes can be chained with spaces", SELECT_SYMBOL['scope'] + ' ' + SELECT_SYMBOL['unscope']: "go back a scope", } CONFIG_FILE_NAME = 'shell-config' GESTURE_LENGTH = max(len(key) for key in GESTURE_INFO) + 1 def help_text(values): """ reformats the help text """ result = "" for key in values: result += key + ' '.join('' for x in range(GESTURE_LENGTH - len(key))) +\ ': ' + values[key] + '\n' return result SHELL_HELP = help_text(GESTURE_INFO) class Configuration(object): """ configuration for program """ BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False, 'y': True, 'Y': True, 'n': False, 'N': False} """ Configuration information """ def __init__(self, cli_config, style=None): self.config = configparser.ConfigParser({ 'firsttime': 'yes', 'style': style if style else 'default' }) self.cli_config = cli_config self.config.add_section('Help Files') self.config.add_section('Layout') self.config.set('Help Files', 'command', 'help_dump.json') self.config.set('Help Files', 'history', 'history.txt') self.config.set('Help Files', 'frequency', 'frequency.json') self.config.set('Layout', 'command_description', 'yes') self.config.set('Layout', 'param_description', 'yes') self.config.set('Layout', 'examples', 'yes') self.config_dir = os.getenv('AZURE_CONFIG_DIR') or os.path.expanduser(os.path.join('~', '.azure-shell')) if not os.path.exists(self.config_dir): os.makedirs(self.config_dir) if not os.path.exists(os.path.join(self.config_dir, CONFIG_FILE_NAME)): with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file: self.config.write(config_file) else: with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'r') as config_file: self.config.readfp(config_file) # pylint: disable=deprecated-method self.update() def get_config_dir(self): return self.config_dir def get_history(self): """ returns the history """ return self.config.get('Help Files', 'history') def get_help_files(self): """ returns where the command table is cached """ return self.config.get('Help Files', 'command') def get_frequency(self): """ returns the name of the frequency file """ return self.config.get('Help Files', 'frequency') def load(self, path): """ loads the configuration settings """ self.config.read(path) def firsttime(self): """ sets it as already done""" self.config.set('DEFAULT', 'firsttime', 'no') if self.cli_config.getboolean('core', 'collect_telemetry', fallback=False): print(PRIVACY_STATEMENT) else: self.cli_config.set_value('core', 'collect_telemetry', ask_user_for_telemetry()) self.update() def get_style(self): """ gets the last style they used """ return self.config.get('DEFAULT', 'style') def has_feedback(self): """ returns whether user has given feedback """ return self.cli_config.getboolean('core', 'given feedback', fallback='false') def set_feedback(self, value): """ sets the feedback in the config """ self.cli_config.set_value('core', 'given feedback', value) def set_style(self, val): """ sets the style they used """ self.set_val('DEFAULT', 'style', val) def set_val(self, direct, section, val): """ set the config values """ if val is not None: self.config.set(direct, section, val) self.update() def update(self): """ updates the configuration settings """ with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file: self.config.write(config_file) def ask_user_for_telemetry(): """ asks the user for if we can collect telemetry """ answer = " " while answer.lower() != 'yes' and answer.lower() != 'no': answer = prompt(u'\nDo you agree to sending telemetry (yes/no)? Default answer is yes: ') if answer == '': answer = 'yes' return answer
37.84
122
0.592319
from __future__ import print_function import os from six.moves import configparser from prompt_toolkit import prompt from azure.cli.core._help import PRIVACY_STATEMENT SELECT_SYMBOL = { 'outside': '#', 'query': '??', 'example': '::', 'exit_code': '$', 'scope': '%%', 'unscope': '..' } GESTURE_INFO = { SELECT_SYMBOL['outside'] + "[cmd]": "use commands outside the application", "[cmd] + [param] +" + "\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Inject jmespath query from previous command", "\"" + SELECT_SYMBOL['query'] + "[query]" + "\"": "Jmespath query of the previous command", "[cmd] " + SELECT_SYMBOL['example'] + " [num]": "do a step by step tutorial of example", SELECT_SYMBOL['exit_code']: "get the exit code of the previous command", SELECT_SYMBOL['scope'] + '[cmd]': "set a scope, and scopes can be chained with spaces", SELECT_SYMBOL['scope'] + ' ' + SELECT_SYMBOL['unscope']: "go back a scope", } CONFIG_FILE_NAME = 'shell-config' GESTURE_LENGTH = max(len(key) for key in GESTURE_INFO) + 1 def help_text(values): result = "" for key in values: result += key + ' '.join('' for x in range(GESTURE_LENGTH - len(key))) +\ ': ' + values[key] + '\n' return result SHELL_HELP = help_text(GESTURE_INFO) class Configuration(object): BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False, 'y': True, 'Y': True, 'n': False, 'N': False} def __init__(self, cli_config, style=None): self.config = configparser.ConfigParser({ 'firsttime': 'yes', 'style': style if style else 'default' }) self.cli_config = cli_config self.config.add_section('Help Files') self.config.add_section('Layout') self.config.set('Help Files', 'command', 'help_dump.json') self.config.set('Help Files', 'history', 'history.txt') self.config.set('Help Files', 'frequency', 'frequency.json') self.config.set('Layout', 'command_description', 'yes') self.config.set('Layout', 'param_description', 'yes') self.config.set('Layout', 'examples', 'yes') self.config_dir = os.getenv('AZURE_CONFIG_DIR') or os.path.expanduser(os.path.join('~', '.azure-shell')) if not os.path.exists(self.config_dir): os.makedirs(self.config_dir) if not os.path.exists(os.path.join(self.config_dir, CONFIG_FILE_NAME)): with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file: self.config.write(config_file) else: with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'r') as config_file: self.config.readfp(config_file) self.update() def get_config_dir(self): return self.config_dir def get_history(self): return self.config.get('Help Files', 'history') def get_help_files(self): return self.config.get('Help Files', 'command') def get_frequency(self): return self.config.get('Help Files', 'frequency') def load(self, path): self.config.read(path) def firsttime(self): self.config.set('DEFAULT', 'firsttime', 'no') if self.cli_config.getboolean('core', 'collect_telemetry', fallback=False): print(PRIVACY_STATEMENT) else: self.cli_config.set_value('core', 'collect_telemetry', ask_user_for_telemetry()) self.update() def get_style(self): return self.config.get('DEFAULT', 'style') def has_feedback(self): return self.cli_config.getboolean('core', 'given feedback', fallback='false') def set_feedback(self, value): self.cli_config.set_value('core', 'given feedback', value) def set_style(self, val): self.set_val('DEFAULT', 'style', val) def set_val(self, direct, section, val): if val is not None: self.config.set(direct, section, val) self.update() def update(self): with open(os.path.join(self.config_dir, CONFIG_FILE_NAME), 'w') as config_file: self.config.write(config_file) def ask_user_for_telemetry(): answer = " " while answer.lower() != 'yes' and answer.lower() != 'no': answer = prompt(u'\nDo you agree to sending telemetry (yes/no)? Default answer is yes: ') if answer == '': answer = 'yes' return answer
true
true
f70398e1f175d5b25119973036ab52f724bcd197
1,279
py
Python
setup.py
cunningr/yanccm
2d8f891d704672f4d3a15472c7a13edf7832d53d
[ "MIT" ]
null
null
null
setup.py
cunningr/yanccm
2d8f891d704672f4d3a15472c7a13edf7832d53d
[ "MIT" ]
null
null
null
setup.py
cunningr/yanccm
2d8f891d704672f4d3a15472c7a13edf7832d53d
[ "MIT" ]
null
null
null
from distutils.core import setup setup( name='yanccm', packages=[ 'controller', 'sot', 'ncservice', 'ncservice.configDb', 'ncservice.ncDeviceOps', 'ncservice.ncDeviceOps.threaded', 'view'], version='0.0.2', license='MIT', description='''YANCCM (pronounced yank'em) - Yet Another Network Configuration and Change Managment tool, is multi-threaded configuration manger for network devices that leverages the NETCONF protocol''', author='Richard Cunningham', author_email='cunningr@gmail.com', url='https://github.com/cunningr/yanccm', download_url='https://github.com/cunningr/yanccm', keywords=['Netconf', 'Cisco', 'configuration management'], install_requires=[ 'ncclient', 'lxml', 'pyyaml', 'pymongo', 'tabulate', 'requests', 'jinja2' ], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6' ], entry_points={ 'console_scripts': [ 'yanccm = controller.cli:main' ] } )
29.068182
113
0.591087
from distutils.core import setup setup( name='yanccm', packages=[ 'controller', 'sot', 'ncservice', 'ncservice.configDb', 'ncservice.ncDeviceOps', 'ncservice.ncDeviceOps.threaded', 'view'], version='0.0.2', license='MIT', description='''YANCCM (pronounced yank'em) - Yet Another Network Configuration and Change Managment tool, is multi-threaded configuration manger for network devices that leverages the NETCONF protocol''', author='Richard Cunningham', author_email='cunningr@gmail.com', url='https://github.com/cunningr/yanccm', download_url='https://github.com/cunningr/yanccm', keywords=['Netconf', 'Cisco', 'configuration management'], install_requires=[ 'ncclient', 'lxml', 'pyyaml', 'pymongo', 'tabulate', 'requests', 'jinja2' ], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6' ], entry_points={ 'console_scripts': [ 'yanccm = controller.cli:main' ] } )
true
true
f7039945cbf7c33f6d0a0a69ac4129d20b76d598
194
py
Python
src/terregex/__init__.py
m1kit/terregex
b8a809b2db664f26f90f7da29481be97cf6959f7
[ "Apache-2.0" ]
null
null
null
src/terregex/__init__.py
m1kit/terregex
b8a809b2db664f26f90f7da29481be97cf6959f7
[ "Apache-2.0" ]
null
null
null
src/terregex/__init__.py
m1kit/terregex
b8a809b2db664f26f90f7da29481be97cf6959f7
[ "Apache-2.0" ]
null
null
null
from terregex.mlr import Node, NodeList, Literal, NotLiteral, \ In, Negate, Range, Category, MinRepeat, MaxRepeat, \ SubPattern, Branch, Any, parse from terregex.transform import Transformer
48.5
63
0.778351
from terregex.mlr import Node, NodeList, Literal, NotLiteral, \ In, Negate, Range, Category, MinRepeat, MaxRepeat, \ SubPattern, Branch, Any, parse from terregex.transform import Transformer
true
true
f70399bc2b81b97e423fbbbaf3c843a17a7dcaf9
1,452
py
Python
var/spack/repos/builtin/packages/prinseq-lite/package.py
player1537-forks/spack
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
11
2015-10-04T02:17:46.000Z
2018-02-07T18:23:00.000Z
var/spack/repos/builtin/packages/prinseq-lite/package.py
player1537-forks/spack
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
22
2017-08-01T22:45:10.000Z
2022-03-10T07:46:31.000Z
var/spack/repos/builtin/packages/prinseq-lite/package.py
player1537-forks/spack
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
4
2016-06-10T17:57:39.000Z
2018-09-11T04:59:38.000Z
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PrinseqLite(Package): """PRINSEQ will help you to preprocess your genomic or metagenomic sequence data in FASTA or FASTQ format.""" homepage = "http://prinseq.sourceforge.net" url = "https://sourceforge.net/projects/prinseq/files/standalone/prinseq-lite-0.20.4.tar.gz" version('0.20.4', sha256='9b5e0dce3b7f02f09e1cc7e8a2dd77c0b133e5e35529d570ee901f53ebfeb56f') variant('nopca', default=True, description="Graphs version without PCA") depends_on('perl', type='run') depends_on('perl-cairo', type='run') depends_on('perl-digest-md5', type='run') depends_on('perl-json', type='run') def install(self, spec, prefix): mkdirp(prefix.bin) filter_file(r'#!/usr/bin/perl', '#!/usr/bin/env perl', 'prinseq-graphs-noPCA.pl') filter_file(r'#!/usr/bin/perl', '#!/usr/bin/env perl', 'prinseq-lite.pl') install('prinseq-graphs-noPCA.pl', prefix.bin) install('prinseq-lite.pl', prefix.bin) chmod = which('chmod') chmod('+x', join_path(self.prefix.bin, 'prinseq-graphs-noPCA.pl')) chmod('+x', join_path(self.prefix.bin, 'prinseq-lite.pl'))
34.571429
101
0.643251
from spack import * class PrinseqLite(Package): homepage = "http://prinseq.sourceforge.net" url = "https://sourceforge.net/projects/prinseq/files/standalone/prinseq-lite-0.20.4.tar.gz" version('0.20.4', sha256='9b5e0dce3b7f02f09e1cc7e8a2dd77c0b133e5e35529d570ee901f53ebfeb56f') variant('nopca', default=True, description="Graphs version without PCA") depends_on('perl', type='run') depends_on('perl-cairo', type='run') depends_on('perl-digest-md5', type='run') depends_on('perl-json', type='run') def install(self, spec, prefix): mkdirp(prefix.bin) filter_file(r'#!/usr/bin/perl', '#!/usr/bin/env perl', 'prinseq-graphs-noPCA.pl') filter_file(r'#!/usr/bin/perl', '#!/usr/bin/env perl', 'prinseq-lite.pl') install('prinseq-graphs-noPCA.pl', prefix.bin) install('prinseq-lite.pl', prefix.bin) chmod = which('chmod') chmod('+x', join_path(self.prefix.bin, 'prinseq-graphs-noPCA.pl')) chmod('+x', join_path(self.prefix.bin, 'prinseq-lite.pl'))
true
true
f7039b4c486e283b178471dcf9b70206d4183022
81,410
py
Python
scipy/optimize/tests/test_linprog.py
pranavrajpal/scipy
7dcdeffed53483a60b3e054618520e0f28adeba4
[ "BSD-3-Clause" ]
1
2021-06-11T22:09:38.000Z
2021-06-11T22:09:38.000Z
scipy/optimize/tests/test_linprog.py
pranavrajpal/scipy
7dcdeffed53483a60b3e054618520e0f28adeba4
[ "BSD-3-Clause" ]
null
null
null
scipy/optimize/tests/test_linprog.py
pranavrajpal/scipy
7dcdeffed53483a60b3e054618520e0f28adeba4
[ "BSD-3-Clause" ]
1
2021-09-13T20:44:38.000Z
2021-09-13T20:44:38.000Z
""" Unit test for Linear Programming """ import sys import numpy as np from numpy.testing import (assert_, assert_allclose, assert_equal, assert_array_less, assert_warns, suppress_warnings) from pytest import raises as assert_raises from scipy.optimize import linprog, OptimizeWarning from scipy.sparse.linalg import MatrixRankWarning from scipy.linalg import LinAlgWarning import scipy.sparse import pytest has_umfpack = True try: from scikits.umfpack import UmfpackWarning except ImportError: has_umfpack = False has_cholmod = True try: import sksparse from sksparse.cholmod import cholesky as cholmod except ImportError: has_cholmod = False def _assert_iteration_limit_reached(res, maxiter): assert_(not res.success, "Incorrectly reported success") assert_(res.success < maxiter, "Incorrectly reported number of iterations") assert_equal(res.status, 1, "Failed to report iteration limit reached") def _assert_infeasible(res): # res: linprog result object assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 2, "failed to report infeasible status") def _assert_unbounded(res): # res: linprog result object assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 3, "failed to report unbounded status") def _assert_unable_to_find_basic_feasible_sol(res): # res: linprog result object # The status may be either 2 or 4 depending on why the feasible solution # could not be found. If the undelying problem is expected to not have a # feasible solution, _assert_infeasible should be used. assert_(not res.success, "incorrectly reported success") assert_(res.status in (2, 4), "failed to report optimization failure") def _assert_success(res, desired_fun=None, desired_x=None, rtol=1e-8, atol=1e-8): # res: linprog result object # desired_fun: desired objective function value or None # desired_x: desired solution or None if not res.success: msg = "linprog status {0}, message: {1}".format(res.status, res.message) raise AssertionError(msg) assert_equal(res.status, 0) if desired_fun is not None: assert_allclose(res.fun, desired_fun, err_msg="converged to an unexpected objective value", rtol=rtol, atol=atol) if desired_x is not None: assert_allclose(res.x, desired_x, err_msg="converged to an unexpected solution", rtol=rtol, atol=atol) def magic_square(n): """ Generates a linear program for which integer solutions represent an n x n magic square; binary decision variables represent the presence (or absence) of an integer 1 to n^2 in each position of the square. """ np.random.seed(0) M = n * (n**2 + 1) / 2 numbers = np.arange(n**4) // n**2 + 1 numbers = numbers.reshape(n**2, n, n) zeros = np.zeros((n**2, n, n)) A_list = [] b_list = [] # Rule 1: use every number exactly once for i in range(n**2): A_row = zeros.copy() A_row[i, :, :] = 1 A_list.append(A_row.flatten()) b_list.append(1) # Rule 2: Only one number per square for i in range(n): for j in range(n): A_row = zeros.copy() A_row[:, i, j] = 1 A_list.append(A_row.flatten()) b_list.append(1) # Rule 3: sum of rows is M for i in range(n): A_row = zeros.copy() A_row[:, i, :] = numbers[:, i, :] A_list.append(A_row.flatten()) b_list.append(M) # Rule 4: sum of columns is M for i in range(n): A_row = zeros.copy() A_row[:, :, i] = numbers[:, :, i] A_list.append(A_row.flatten()) b_list.append(M) # Rule 5: sum of diagonals is M A_row = zeros.copy() A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)] A_list.append(A_row.flatten()) b_list.append(M) A_row = zeros.copy() A_row[:, range(n), range(-1, -n - 1, -1)] = \ numbers[:, range(n), range(-1, -n - 1, -1)] A_list.append(A_row.flatten()) b_list.append(M) A = np.array(np.vstack(A_list), dtype=float) b = np.array(b_list, dtype=float) c = np.random.rand(A.shape[1]) return A, b, c, numbers def lpgen_2d(m, n): """ -> A b c LP test: m*n vars, m+n constraints row sums == n/m, col sums == 1 https://gist.github.com/denis-bz/8647461 """ np.random.seed(0) c = - np.random.exponential(size=(m, n)) Arow = np.zeros((m, m * n)) brow = np.zeros(m) for j in range(m): j1 = j + 1 Arow[j, j * n:j1 * n] = 1 brow[j] = n / m Acol = np.zeros((n, m * n)) bcol = np.zeros(n) for j in range(n): j1 = j + 1 Acol[j, j::n] = 1 bcol[j] = 1 A = np.vstack((Arow, Acol)) b = np.hstack((brow, bcol)) return A, b, c.ravel() def very_random_gen(seed=0): np.random.seed(seed) m_eq, m_ub, n = 10, 20, 50 c = np.random.rand(n)-0.5 A_ub = np.random.rand(m_ub, n)-0.5 b_ub = np.random.rand(m_ub)-0.5 A_eq = np.random.rand(m_eq, n)-0.5 b_eq = np.random.rand(m_eq)-0.5 lb = -np.random.rand(n) ub = np.random.rand(n) lb[lb < -np.random.rand()] = -np.inf ub[ub > np.random.rand()] = np.inf bounds = np.vstack((lb, ub)).T return c, A_ub, b_ub, A_eq, b_eq, bounds def nontrivial_problem(): c = [-1, 8, 4, -6] A_ub = [[-7, -7, 6, 9], [1, -1, -3, 0], [10, -10, -7, 7], [6, -1, 3, 4]] b_ub = [-3, 6, -6, 6] A_eq = [[-10, 1, 1, -8]] b_eq = [-4] x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391] f_star = 7083 / 1391 return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star def l1_regression_prob(seed=0, m=8, d=9, n=100): ''' Training data is {(x0, y0), (x1, y2), ..., (xn-1, yn-1)} x in R^d y in R n: number of training samples d: dimension of x, i.e. x in R^d phi: feature map R^d -> R^m m: dimension of feature space ''' np.random.seed(seed) phi = np.random.normal(0, 1, size=(m, d)) # random feature mapping w_true = np.random.randn(m) x = np.random.normal(0, 1, size=(d, n)) # features y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) # measurements # construct the problem c = np.ones(m+n) c[:m] = 0 A_ub = scipy.sparse.lil_matrix((2*n, n+m)) idx = 0 for ii in range(n): A_ub[idx, :m] = phi @ x[:, ii] A_ub[idx, m+ii] = -1 A_ub[idx+1, :m] = -1*phi @ x[:, ii] A_ub[idx+1, m+ii] = -1 idx += 2 A_ub = A_ub.tocsc() b_ub = np.zeros(2*n) b_ub[0::2] = y b_ub[1::2] = -y bnds = [(None, None)]*m + [(0, None)]*n return c, A_ub, b_ub, bnds def generic_callback_test(self): # Check that callback is as advertised last_cb = {} def cb(res): message = res.pop('message') complete = res.pop('complete') assert_(res.pop('phase') in (1, 2)) assert_(res.pop('status') in range(4)) assert_(isinstance(res.pop('nit'), int)) assert_(isinstance(complete, bool)) assert_(isinstance(message, str)) last_cb['x'] = res['x'] last_cb['fun'] = res['fun'] last_cb['slack'] = res['slack'] last_cb['con'] = res['con'] c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) assert_allclose(last_cb['fun'], res['fun']) assert_allclose(last_cb['x'], res['x']) assert_allclose(last_cb['con'], res['con']) assert_allclose(last_cb['slack'], res['slack']) def test_unknown_solvers_and_options(): c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki') assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, options={"rr_method": 'ekki-ekki-ekki'}) def test_choose_solver(): # 'highs' chooses 'dual' c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub, b_ub, method='highs') _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) A_ub = None b_ub = None A_eq = None b_eq = None bounds = None ################ # Common Tests # ################ class LinprogCommonTests: """ Base class for `linprog` tests. Generally, each test will be performed once for every derived class of LinprogCommonTests, each of which will typically change self.options and/or self.method. Effectively, these tests are run for many combination of method (simplex, revised simplex, and interior point) and options (such as pivoting rule or sparse treatment). """ ################## # Targeted Tests # ################## def test_callback(self): generic_callback_test(self) def test_disp(self): # test that display option does not break anything. A, b, c = lpgen_2d(20, 20) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"disp": True}) _assert_success(res, desired_fun=-64.049494229) def test_docstring_example(self): # Example from linprog docstring. c = [-1, 4] A = [[-3, 1], [1, 2]] b = [6, 4] x0_bounds = (None, None) x1_bounds = (-3, None) res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), options=self.options, method=self.method) _assert_success(res, desired_fun=-22) def test_type_error(self): # (presumably) checks that linprog recognizes type errors # This is tested more carefully in test__linprog_clean_inputs.py c = [1] A_eq = [[1]] b_eq = "hello" assert_raises(TypeError, linprog, c, A_eq=A_eq, b_eq=b_eq, method=self.method, options=self.options) def test_aliasing_b_ub(self): # (presumably) checks that linprog does not modify b_ub # This is tested more carefully in test__linprog_clean_inputs.py c = np.array([1.0]) A_ub = np.array([[1.0]]) b_ub_orig = np.array([3.0]) b_ub = b_ub_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-4, desired_x=[-4]) assert_allclose(b_ub_orig, b_ub) def test_aliasing_b_eq(self): # (presumably) checks that linprog does not modify b_eq # This is tested more carefully in test__linprog_clean_inputs.py c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq_orig = np.array([3.0]) b_eq = b_eq_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) assert_allclose(b_eq_orig, b_eq) def test_non_ndarray_args(self): # (presumably) checks that linprog accepts list in place of arrays # This is tested more carefully in test__linprog_clean_inputs.py c = [1.0] A_ub = [[1.0]] b_ub = [3.0] A_eq = [[1.0]] b_eq = [2.0] bounds = (-1.0, 10.0) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=2, desired_x=[2]) def test_unknown_options(self): c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, options={}): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=options) o = {key: self.options[key] for key in self.options} o['spam'] = 42 assert_warns(OptimizeWarning, f, c, A_ub=A_ub, b_ub=b_ub, options=o) def test_invalid_inputs(self): def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) # Test ill-formatted bounds assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)]) assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)]) assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)]) # Test other invalid inputs assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2]) assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1]) assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2]) assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1]) assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1) # this last check doesn't make sense for sparse presolve if ("_sparse_presolve" in self.options and self.options["_sparse_presolve"]): return # there aren't 3-D sparse matrices assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1) def test_sparse_constraints(self): # gh-13559: improve error message for sparse inputs when unsupported def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) np.random.seed(0) m = 100 n = 150 A_eq = scipy.sparse.rand(m, n, 0.5) x_valid = np.random.randn((n)) c = np.random.randn((n)) ub = x_valid + np.random.rand((n)) lb = x_valid - np.random.rand((n)) bounds = np.column_stack((lb, ub)) b_eq = A_eq * x_valid if self.method in {'simplex', 'revised simplex'}: # simplex and revised simplex should raise error with assert_raises(ValueError, match=f"Method '{self.method}' " "does not support sparse constraint matrices."): linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) else: # other methods should succeed options = {**self.options} if self.method in {'interior-point'}: options['sparse'] = True res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=options) assert res.success def test_maxiter(self): # test iteration limit w/ Enzo example c = [4, 8, 3, 0, 0, 0] A = [ [2, 5, 3, -1, 0, 0], [3, 2.5, 8, 0, -1, 0], [8, 10, 4, 0, 0, -1]] b = [185, 155, 600] np.random.seed(0) maxiter = 3 res = linprog(c, A_eq=A, b_eq=b, method=self.method, options={"maxiter": maxiter}) _assert_iteration_limit_reached(res, maxiter) assert_equal(res.nit, maxiter) def test_bounds_fixed(self): # Test fixed bounds (upper equal to lower) # If presolve option True, test if solution found in presolve (i.e. # number of iterations is 0). do_presolve = self.options.get('presolve', True) res = linprog([1], bounds=(1, 1), method=self.method, options=self.options) _assert_success(res, 1, 1) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)], method=self.method, options=self.options) _assert_success(res, 12, [5, -1, 3]) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 1], bounds=[(1, 1), (1, 3)], method=self.method, options=self.options) _assert_success(res, 2, [1, 1]) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7], bounds=[(-5, 5), (0, 10), (3.5, 3.5)], method=self.method, options=self.options) _assert_success(res, 15, [1, 7, 3.5]) if do_presolve: assert_equal(res.nit, 0) def test_bounds_infeasible(self): # Test ill-valued bounds (upper less than lower) # If presolve option True, test if solution found in presolve (i.e. # number of iterations is 0). do_presolve = self.options.get('presolve', True) res = linprog([1], bounds=(1, -2), method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) def test_bounds_infeasible_2(self): # Test ill-valued bounds (lower inf, upper -inf) # If presolve option True, test if solution found in presolve (i.e. # number of iterations is 0). # For the simplex method, the cases do not result in an # infeasible status, but in a RuntimeWarning. This is a # consequence of having _presolve() take care of feasibility # checks. See issue gh-11618. do_presolve = self.options.get('presolve', True) simplex_without_presolve = not do_presolve and self.method == 'simplex' c = [1, 2, 3] bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)] bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)] if simplex_without_presolve: def g(c, bounds): res = linprog(c, bounds=bounds, method=self.method, options=self.options) return res with pytest.warns(RuntimeWarning): with pytest.raises(IndexError): g(c, bounds=bounds_1) with pytest.warns(RuntimeWarning): with pytest.raises(IndexError): g(c, bounds=bounds_2) else: res = linprog(c=c, bounds=bounds_1, method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog(c=c, bounds=bounds_2, method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) def test_empty_constraint_1(self): c = [-1, -2] res = linprog(c, method=self.method, options=self.options) _assert_unbounded(res) def test_empty_constraint_2(self): c = [-1, 1, -1, 1] bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] res = linprog(c, bounds=bounds, method=self.method, options=self.options) _assert_unbounded(res) # Unboundedness detected in presolve requires no iterations if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_empty_constraint_3(self): c = [1, -1, 1, -1] bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] res = linprog(c, bounds=bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2) def test_inequality_constraints(self): # Minimize linear function subject to linear inequality constraints. # http://www.dam.brown.edu/people/huiwang/classes/am121/Archive/simplex_121_c.pdf c = np.array([3, 2]) * -1 # maximize A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-18, desired_x=[2, 6]) def test_inequality_constraints2(self): # Minimize linear function subject to linear inequality constraints. # http://www.statslab.cam.ac.uk/~ff271/teaching/opt/notes/notes8.pdf # (dead link) c = [6, 3] A_ub = [[0, 3], [-1, -1], [-2, 1]] b_ub = [2, -1, -1] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3]) def test_bounds_simple(self): c = [1, 2] bounds = (1, 2) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[1, 1]) bounds = [(1, 2), (1, 2)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[1, 1]) def test_bounded_below_only_1(self): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (1.0, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) def test_bounded_below_only_2(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (0.5, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounded_above_only_1(self): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (None, 10.0) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) def test_bounded_above_only_2(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (-np.inf, 4) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounds_infinity(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (-np.inf, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounds_mixed(self): # Problem has one unbounded variable and # another with a negative lower bound. c = np.array([-1, 4]) * -1 # maximize A_ub = np.array([[-3, 1], [1, 2]], dtype=np.float64) b_ub = [6, 4] x0_bounds = (-np.inf, np.inf) x1_bounds = (-3, np.inf) bounds = (x0_bounds, x1_bounds) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7]) def test_bounds_equal_but_infeasible(self): c = [-4, 1] A_ub = [[7, -2], [0, 1], [2, -2]] b_ub = [14, 0, 3] bounds = [(2, 2), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bounds_equal_but_infeasible2(self): c = [-4, 1] A_eq = [[7, -2], [0, 1], [2, -2]] b_eq = [14, 0, 3] bounds = [(2, 2), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bounds_equal_no_presolve(self): # There was a bug when a lower and upper bound were equal but # presolve was not on to eliminate the variable. The bound # was being converted to an equality constraint, but the bound # was not eliminated, leading to issues in postprocessing. c = [1, 2] A_ub = [[1, 2], [1.1, 2.2]] b_ub = [4, 8] bounds = [(1, 2), (2, 2)] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_infeasible(res) def test_zero_column_1(self): m, n = 3, 4 np.random.seed(0) c = np.random.rand(n) c[1] = 1 A_eq = np.random.rand(m, n) A_eq[:, 1] = 0 b_eq = np.random.rand(m) A_ub = [[1, 0, 1, 1]] b_ub = 3 bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-9.7087836730413404) def test_zero_column_2(self): np.random.seed(0) m, n = 2, 4 c = np.random.rand(n) c[1] = -1 A_eq = np.random.rand(m, n) A_eq[:, 1] = 0 b_eq = np.random.rand(m) A_ub = np.random.rand(m, n) A_ub[:, 1] = 0 b_ub = np.random.rand(m) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) # Unboundedness detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_zero_row_1(self): c = [1, 2, 3] A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] b_eq = [0, 3, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3) def test_zero_row_2(self): A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] b_ub = [0, 3, 0] c = [1, 2, 3] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0) def test_zero_row_3(self): m, n = 2, 4 c = np.random.rand(n) A_eq = np.random.rand(m, n) A_eq[0, :] = 0 b_eq = np.random.rand(m) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_zero_row_4(self): m, n = 2, 4 c = np.random.rand(n) A_ub = np.random.rand(m, n) A_ub[0, :] = 0 b_ub = -np.random.rand(m) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_eq_1(self): c = [1, 1, 1, 2] A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] b_eq = [1, 2, 2, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_eq_2(self): c = [1, 1, 1, 2] A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] b_eq = [1, 2, 1, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=4) def test_singleton_row_ub_1(self): c = [1, 1, 1, 2] A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] b_ub = [1, 2, -2, 4] bounds = [(None, None), (0, None), (0, None), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_ub_2(self): c = [1, 1, 1, 2] A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] b_ub = [1, 2, -0.5, 4] bounds = [(None, None), (0, None), (0, None), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0.5) def test_infeasible(self): # Test linprog response to an infeasible problem c = [-1, -1] A_ub = [[1, 0], [0, 1], [-1, -1]] b_ub = [2, 2, -5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_infeasible_inequality_bounds(self): c = [1] A_ub = [[2]] b_ub = 4 bounds = (5, 6) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) # Infeasibility detected in presolve if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_unbounded(self): # Test linprog response to an unbounded problem c = np.array([1, 1]) * -1 # maximize A_ub = [[-1, 1], [-1, -1]] b_ub = [-1, -2] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) def test_unbounded_below_no_presolve_corrected(self): c = [1] bounds = [(None, 1)] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c=c, bounds=bounds, method=self.method, options=o) if self.method == "revised simplex": # Revised simplex has a special pathway for no constraints. assert_equal(res.status, 5) else: _assert_unbounded(res) def test_unbounded_no_nontrivial_constraints_1(self): """ Test whether presolve pathway for detecting unboundedness after constraint elimination is working. """ c = np.array([0, 0, 0, 1, -1, -1]) A_ub = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1]]) b_ub = np.array([2, -2, 0]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) if not self.method.lower().startswith("highs"): assert_equal(res.x[-1], np.inf) assert_equal(res.message[:36], "The problem is (trivially) unbounded") def test_unbounded_no_nontrivial_constraints_2(self): """ Test whether presolve pathway for detecting unboundedness after constraint elimination is working. """ c = np.array([0, 0, 0, 1, -1, 1]) A_ub = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]) b_ub = np.array([2, -2, 0]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1), (None, 0)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) if not self.method.lower().startswith("highs"): assert_equal(res.x[-1], -np.inf) assert_equal(res.message[:36], "The problem is (trivially) unbounded") def test_cyclic_recovery(self): # Test linprogs recovery from cycling using the Klee-Minty problem # Klee-Minty https://www.math.ubc.ca/~israel/m340/kleemin3.pdf c = np.array([100, 10, 1]) * -1 # maximize A_ub = [[1, 0, 0], [20, 1, 0], [200, 20, 1]] b_ub = [1, 100, 10000] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7) def test_cyclic_bland(self): # Test the effect of Bland's rule on a cycling problem c = np.array([-10, 57, 9, 24.]) A_ub = np.array([[0.5, -5.5, -2.5, 9], [0.5, -1.5, -0.5, 1], [1, 0, 0, 0]]) b_ub = [0, 0, 1] # copy the existing options dictionary but change maxiter maxiter = 100 o = {key: val for key, val in self.options.items()} o['maxiter'] = maxiter res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) if self.method == 'simplex' and not self.options.get('bland'): # simplex cycles without Bland's rule _assert_iteration_limit_reached(res, o['maxiter']) else: # other methods, including simplex with Bland's rule, succeed _assert_success(res, desired_x=[1, 0, 1, 0]) # note that revised simplex skips this test because it may or may not # cycle depending on the initial basis def test_remove_redundancy_infeasibility(self): # mostly a test of redundancy removal, which is carefully tested in # test__remove_redundancy.py m, n = 10, 10 c = np.random.rand(n) A_eq = np.random.rand(m, n) b_eq = np.random.rand(m) A_eq[-1, :] = 2 * A_eq[-2, :] b_eq[-1] *= -1 with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) ################# # General Tests # ################# def test_nontrivial_problem(self): # Problem involves all constraint types, # negative resource limits, and rounding issues. c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=f_star, desired_x=x_star) def test_lpgen_problem(self): # Test linprog with a rather large problem (400 variables, # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 A_ub, b_ub, c = lpgen_2d(20, 20) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-64.049494229) def test_network_flow(self): # A network flow problem with supply and demand at nodes # and with costs along directed edges. # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] n, p = -1, 1 A_eq = [ [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] b_eq = [0, 19, -16, 33, 0, 0, -36] with suppress_warnings() as sup: sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7) def test_network_flow_limited_capacity(self): # A network flow problem with supply and demand at nodes # and with costs and capacities along directed edges. # http://blog.sommer-forst.de/2013/04/10/ c = [2, 2, 1, 3, 1] bounds = [ [0, 4], [0, 2], [0, 2], [0, 3], [0, 5]] n, p = -1, 1 A_eq = [ [n, n, 0, 0, 0], [p, 0, n, n, 0], [0, p, p, 0, n], [0, 0, 0, p, p]] b_eq = [-4, 0, 0, 4] with suppress_warnings() as sup: # this is an UmfpackWarning but I had trouble importing it if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(OptimizeWarning, "Solving system with option...") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=14) def test_simplex_algorithm_wikipedia_example(self): # https://en.wikipedia.org/wiki/Simplex_algorithm#Example c = [-2, -3, -4] A_ub = [ [3, 2, 1], [2, 5, 3]] b_ub = [10, 15] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-20) def test_enzo_example(self): # https://github.com/scipy/scipy/issues/1779 lp2.py # # Translated from Octave code at: # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm # and placed under MIT licence by Enzo Michelangeli # with permission explicitly granted by the original author, # Prof. Kazunobu Yoshida c = [4, 8, 3, 0, 0, 0] A_eq = [ [2, 5, 3, -1, 0, 0], [3, 2.5, 8, 0, -1, 0], [8, 10, 4, 0, 0, -1]] b_eq = [185, 155, 600] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=317.5, desired_x=[66.25, 0, 17.5, 0, 183.75, 0], atol=6e-6, rtol=1e-7) def test_enzo_example_b(self): # rescued from https://github.com/scipy/scipy/pull/218 c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] A_eq = [[-1, -1, -1, 0, 0, 0], [0, 0, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1]] b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-1.77, desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) def test_enzo_example_c_with_degeneracy(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 20 c = -np.ones(m) tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [0, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) def test_enzo_example_c_with_unboundedness(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2 * np.pi * np.arange(m) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [0, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) def test_enzo_example_c_with_infeasibility(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2 * np.pi * np.arange(m) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [1, 1] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_infeasible(res) def test_basic_artificial_vars(self): # Problem is chosen to test two phase simplex methods when at the end # of phase 1 some artificial variables remain in the basis. # Also, for `method='simplex'`, the row in the tableau corresponding # with the artificial variables is not all zero. c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], [1.0, 1.0, 0, 0, 0, 0]]) b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) b_eq = np.array([0, 0]) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c), atol=2e-6) def test_optimize_result(self): # check all fields in OptimizeResult c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) assert_(res.success) assert_(res.nit) assert_(not res.status) assert_(res.message == "Optimization terminated successfully.") assert_allclose(c @ res.x, res.fun) assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11) assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11) ################# # Bug Fix Tests # ################# def test_bug_5400(self): # https://github.com/scipy/scipy/issues/5400 bounds = [ (0, None), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)] f = 1 / 9 g = -1e4 h = -3.1 A_ub = np.array([ [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0], [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0], [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0], [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0], [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0], [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0], [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]]) b_ub = np.array([ 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900, 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-106.63507541835018) def test_bug_6139(self): # linprog(method='simplex') fails to find a basic feasible solution # if phase 1 pseudo-objective function is outside the provided tol. # https://github.com/scipy/scipy/issues/6139 # Note: This is not strictly a bug as the default tolerance determines # if a result is "close enough" to zero and should not be expected # to work for all cases. c = np.array([1, 1, 1]) A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]]) b_eq = np.array([5.00000000e+00, -1.00000000e+04]) A_ub = -np.array([[0., 1000000., 1010000.]]) b_ub = -np.array([10000000.]) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=14.95, desired_x=np.array([5, 4.95, 5])) def test_bug_6690(self): # linprog simplex used to violate bound constraint despite reporting # success. # https://github.com/scipy/scipy/issues/6690 A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]]) b_eq = np.array([0.9626]) A_ub = np.array([ [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0], [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37], [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0] ]) b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022]) bounds = np.array([ [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15] ]).T c = np.array([ -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28 ]) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(OptimizeWarning, "Solving system with option 'cholesky'") sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) desired_fun = -1.19099999999 desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800, 0.5000, 0.4700, 0.0900, 0.3200, -0.7300]) _assert_success(res, desired_fun=desired_fun, desired_x=desired_x) # Add small tol value to ensure arrays are less than or equal. atol = 1e-6 assert_array_less(bounds[:, 0] - atol, res.x) assert_array_less(res.x, bounds[:, 1] + atol) def test_bug_7044(self): # linprog simplex failed to "identify correct constraints" (?) # leading to a non-optimal solution if A is rank-deficient. # https://github.com/scipy/scipy/issues/7044 A_eq, b_eq, c, N = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) desired_fun = 1.730550597 _assert_success(res, desired_fun=desired_fun) assert_allclose(A_eq.dot(res.x), b_eq) assert_array_less(np.zeros(res.x.size) - 1e-5, res.x) def test_bug_7237(self): # https://github.com/scipy/scipy/issues/7237 # linprog simplex "explodes" when the pivot value is very # close to zero. c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0]) A_ub = np.array([ [1., -724., 911., -551., -555., -896., 478., -80., -293.], [1., 566., 42., 937., 233., 883., 392., -909., 57.], [1., -208., -894., 539., 321., 532., -924., 942., 55.], [1., 857., -859., 83., 462., -265., -971., 826., 482.], [1., 314., -424., 245., -424., 194., -443., -104., -429.], [1., 540., 679., 361., 149., -827., 876., 633., 302.], [0., -1., -0., -0., -0., -0., -0., -0., -0.], [0., -0., -1., -0., -0., -0., -0., -0., -0.], [0., -0., -0., -1., -0., -0., -0., -0., -0.], [0., -0., -0., -0., -1., -0., -0., -0., -0.], [0., -0., -0., -0., -0., -1., -0., -0., -0.], [0., -0., -0., -0., -0., -0., -1., -0., -0.], [0., -0., -0., -0., -0., -0., -0., -1., -0.], [0., -0., -0., -0., -0., -0., -0., -0., -1.], [0., 1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 1.] ]) b_ub = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]) A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]]) b_eq = np.array([[1.]]) bounds = [(None, None)] * 9 res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=108.568535, atol=1e-6) def test_bug_8174(self): # https://github.com/scipy/scipy/issues/8174 # The simplex method sometimes "explodes" if the pivot value is very # close to zero. A_ub = np.array([ [22714, 1008, 13380, -2713.5, -1116], [-4986, -1092, -31220, 17386.5, 684], [-4986, 0, 0, -2713.5, 0], [22714, 0, 0, 17386.5, 0]]) b_ub = np.zeros(A_ub.shape[0]) c = -np.ones(A_ub.shape[1]) bounds = [(0, 1)] * A_ub.shape[1] with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex': _assert_unable_to_find_basic_feasible_sol(res) else: _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6) def test_bug_8174_2(self): # Test supplementary example from issue 8174. # https://github.com/scipy/scipy/issues/8174 # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution c = np.array([1, 0, 0, 0, 0, 0, 0]) A_ub = -np.identity(7) b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]]) A_eq = np.array([ [1, 1, 1, 1, 1, 1, 0], [0.3, 1.3, 0.9, 0, 0, 0, -1], [0.3, 0, 0, 0, 0, 0, -2/3], [0, 0.65, 0, 0, 0, 0, -1/15], [0, 0, 0.3, 0, 0, 0, -1/15] ]) b_eq = np.array([[100], [0], [0], [0], [0]]) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=43.3333333331385) def test_bug_8561(self): # Test that pivot row is chosen correctly when using Bland's rule # This was originally written for the simplex method with # Bland's rule only, but it doesn't hurt to test all methods/options # https://github.com/scipy/scipy/issues/8561 c = np.array([7, 0, -4, 1.5, 1.5]) A_ub = np.array([ [4, 5.5, 1.5, 1.0, -3.5], [1, -2.5, -2, 2.5, 0.5], [3, -0.5, 4, -12.5, -7], [-1, 4.5, 2, -3.5, -2], [5.5, 2, -4.5, -1, 9.5]]) b_ub = np.array([0, 0, 0, 0, 1]) res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options, method=self.method) _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3]) def test_bug_8662(self): # linprog simplex used to report incorrect optimal results # https://github.com/scipy/scipy/issues/8662 c = [-10, 10, 6, 3] A_ub = [[8, -8, -4, 6], [-8, 8, 4, -6], [-4, 4, 8, -4], [3, -3, -3, -10]] b_ub = [9, -9, -9, -4] bounds = [(0, None), (0, None), (0, None), (0, None)] desired_fun = 36.0000000000 with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) # Set boundary condition as a constraint A_ub.append([0, 0, -1, 0]) b_ub.append(0) bounds[2] = (None, None) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) rtol = 1e-5 _assert_success(res1, desired_fun=desired_fun, rtol=rtol) _assert_success(res2, desired_fun=desired_fun, rtol=rtol) def test_bug_8663(self): # exposed a bug in presolve # https://github.com/scipy/scipy/issues/8663 c = [1, 5] A_eq = [[0, -7]] b_eq = [-6] bounds = [(0, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7) def test_bug_8664(self): # interior-point has trouble with this when presolve is off # tested for interior-point with presolve off in TestLinprogIPSpecific # https://github.com/scipy/scipy/issues/8664 c = [4] A_ub = [[2], [5]] b_ub = [4, 4] A_eq = [[0], [-8], [9]] b_eq = [3, 2, 10] with suppress_warnings() as sup: sup.filter(RuntimeWarning) sup.filter(OptimizeWarning, "Solving system with option...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bug_8973(self): """ Test whether bug described at: https://github.com/scipy/scipy/issues/8973 was fixed. """ c = np.array([0, 0, 0, 1, -1]) A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) b_ub = np.array([2, -2]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) # solution vector x is not unique _assert_success(res, desired_fun=-2) # HiGHS IPM had an issue where the following wasn't true! assert_equal(c @ res.x, res.fun) def test_bug_8973_2(self): """ Additional test for: https://github.com/scipy/scipy/issues/8973 suggested in https://github.com/scipy/scipy/pull/8985 review by @antonior92 """ c = np.zeros(1) A_ub = np.array([[1]]) b_ub = np.array([-2]) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[-2], desired_fun=0) def test_bug_10124(self): """ Test for linprog docstring problem 'disp'=True caused revised simplex failure """ c = np.zeros(1) A_ub = np.array([[1]]) b_ub = np.array([-2]) bounds = (None, None) c = [-1, 4] A_ub = [[-3, 1], [1, 2]] b_ub = [6, 4] bounds = [(None, None), (-3, None)] o = {"disp": True} o.update(self.options) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_x=[10, -3], desired_fun=-22) def test_bug_10349(self): """ Test for redundancy removal tolerance issue https://github.com/scipy/scipy/issues/10349 """ A_eq = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 1, 0, 0, 0, 1]]) b_eq = np.array([221, 210, 10, 141, 198, 102]) c = np.concatenate((0, 1, np.zeros(4)), axis=None) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92) def test_bug_10466(self): """ Test that autoscale fixes poorly-scaled problem """ c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.] A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.], [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]] b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08, 1.00663296e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09] o = {} # HiGHS methods don't use autoscale option if not self.method.startswith("highs"): o = {"autoscale": True} o.update(self.options) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option...") if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(RuntimeWarning, "divide by zero encountered...") sup.filter(RuntimeWarning, "overflow encountered...") sup.filter(RuntimeWarning, "invalid value encountered...") sup.filter(LinAlgWarning, "Ill-conditioned matrix...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) assert_allclose(res.fun, -8589934560) ######################### # Method-specific Tests # ######################### class LinprogSimplexTests(LinprogCommonTests): method = "simplex" class LinprogIPTests(LinprogCommonTests): method = "interior-point" class LinprogRSTests(LinprogCommonTests): method = "revised simplex" # Revised simplex does not reliably solve these problems. # Failure is intermittent due to the random choice of elements to complete # the basis after phase 1 terminates. In any case, linprog exists # gracefully, reporting numerical difficulties. I do not think this should # prevent revised simplex from being merged, as it solves the problems # most of the time and solves a broader range of problems than the existing # simplex implementation. # I believe that the root cause is the same for all three and that this # same issue prevents revised simplex from solving many other problems # reliably. Somehow the pivoting rule allows the algorithm to pivot into # a singular basis. I haven't been able to find a reference that # acknowledges this possibility, suggesting that there is a bug. On the # other hand, the pivoting rule is quite simple, and I can't find a # mistake, which suggests that this is a possibility with the pivoting # rule. Hopefully, a better pivoting rule will fix the issue. def test_bug_5400(self): pytest.skip("Intermittent failure acceptable.") def test_bug_8662(self): pytest.skip("Intermittent failure acceptable.") def test_network_flow(self): pytest.skip("Intermittent failure acceptable.") class LinprogHiGHSTests(LinprogCommonTests): def test_callback(self): # this is the problem from test_callback cb = lambda res: None c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method) _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) @pytest.mark.parametrize("options", [{"maxiter": -1}, {"disp": -1}, {"presolve": -1}, {"time_limit": -1}, {"dual_feasibility_tolerance": -1}, {"primal_feasibility_tolerance": -1}, {"ipm_optimality_tolerance": -1}, {"simplex_dual_edge_weight_strategy": "ekki"}, ]) def test_invalid_option_values(self, options): def f(options): linprog(1, method=self.method, options=options) options.update(self.options) assert_warns(OptimizeWarning, f, options=options) def test_crossover(self): c = np.array([1, 1]) * -1 # maximize A_ub = np.array([[1, 1]]) b_ub = [1] res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) # there should be nonzero crossover iterations for IPM (only) assert_equal(res.crossover_nit == 0, self.method != "highs-ipm") ################################ # Simplex Option-Specific Tests# ################################ class TestLinprogSimplexDefault(LinprogSimplexTests): def setup_method(self): self.options = {} def test_bug_5400(self): pytest.skip("Simplex fails on this problem.") def test_bug_7237_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate error is raised. pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate warning is issued. self.options.update({'tol': 1e-12}) with pytest.warns(OptimizeWarning): super(TestLinprogSimplexDefault, self).test_bug_8174() class TestLinprogSimplexBland(LinprogSimplexTests): def setup_method(self): self.options = {'bland': True} def test_bug_5400(self): pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate error is raised. self.options.update({'tol': 1e-12}) with pytest.raises(AssertionError): with pytest.warns(OptimizeWarning): super(TestLinprogSimplexBland, self).test_bug_8174() class TestLinprogSimplexNoPresolve(LinprogSimplexTests): def setup_method(self): self.options = {'presolve': False} is_32_bit = np.intp(0).itemsize < 8 is_linux = sys.platform.startswith('linux') @pytest.mark.xfail( condition=is_32_bit and is_linux, reason='Fails with warning on 32-bit linux') def test_bug_5400(self): super(TestLinprogSimplexNoPresolve, self).test_bug_5400() def test_bug_6139_low_tol(self): # Linprog(method='simplex') fails to find a basic feasible solution # if phase 1 pseudo-objective function is outside the provided tol. # https://github.com/scipy/scipy/issues/6139 # Without ``presolve`` eliminating such rows the result is incorrect. self.options.update({'tol': 1e-12}) with pytest.raises(AssertionError, match='linprog status 4'): return super(TestLinprogSimplexNoPresolve, self).test_bug_6139() def test_bug_7237_low_tol(self): pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): # Fails if the tolerance is too strict. Here, we test that # even if the solution is wrong, the appropriate warning is issued. self.options.update({'tol': 1e-12}) with pytest.warns(OptimizeWarning): super(TestLinprogSimplexNoPresolve, self).test_bug_8174() def test_unbounded_no_nontrivial_constraints_1(self): pytest.skip("Tests behavior specific to presolve") def test_unbounded_no_nontrivial_constraints_2(self): pytest.skip("Tests behavior specific to presolve") ####################################### # Interior-Point Option-Specific Tests# ####################################### class TestLinprogIPDense(LinprogIPTests): options = {"sparse": False} if has_cholmod: class TestLinprogIPSparseCholmod(LinprogIPTests): options = {"sparse": True, "cholesky": True} if has_umfpack: class TestLinprogIPSparseUmfpack(LinprogIPTests): options = {"sparse": True, "cholesky": False} def test_bug_10466(self): pytest.skip("Autoscale doesn't fix everything, and that's OK.") class TestLinprogIPSparse(LinprogIPTests): options = {"sparse": True, "cholesky": False, "sym_pos": False} @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " "perturbations in linear system solution in " "_linprog_ip._sym_solve.") def test_bug_6139(self): super(TestLinprogIPSparse, self).test_bug_6139() @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') def test_bug_6690(self): # Test defined in base class, but can't mark as xfail there super(TestLinprogIPSparse, self).test_bug_6690() def test_magic_square_sparse_no_presolve(self): # test linprog with a problem with a rank-deficient A_eq matrix A_eq, b_eq, c, N = magic_square(3) bounds = (0, 1) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(MatrixRankWarning, "Matrix is exactly singular") sup.filter(OptimizeWarning, "Solving system with option...") o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_fun=1.730550597) def test_sparse_solve_options(self): # checking that problem is solved with all column permutation options A_eq, b_eq, c, N = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(OptimizeWarning, "Invalid permc_spec option") o = {key: self.options[key] for key in self.options} permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD', 'ekki-ekki-ekki') # 'ekki-ekki-ekki' raises warning about invalid permc_spec option # and uses default for permc_spec in permc_specs: o["permc_spec"] = permc_spec res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_fun=1.730550597) class TestLinprogIPSparsePresolve(LinprogIPTests): options = {"sparse": True, "_sparse_presolve": True} @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " "perturbations in linear system solution in " "_linprog_ip._sym_solve.") def test_bug_6139(self): super(TestLinprogIPSparsePresolve, self).test_bug_6139() def test_enzo_example_c_with_infeasibility(self): pytest.skip('_sparse_presolve=True incompatible with presolve=False') @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') def test_bug_6690(self): # Test defined in base class, but can't mark as xfail there super(TestLinprogIPSparsePresolve, self).test_bug_6690() class TestLinprogIPSpecific: method = "interior-point" # the following tests don't need to be performed separately for # sparse presolve, sparse after presolve, and dense def test_solver_select(self): # check that default solver is selected as expected if has_cholmod: options = {'sparse': True, 'cholesky': True} elif has_umfpack: options = {'sparse': True, 'cholesky': False} else: options = {'sparse': True, 'cholesky': False, 'sym_pos': False} A, b, c = lpgen_2d(20, 20) res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options) res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver assert_allclose(res1.fun, res2.fun, err_msg="linprog default solver unexpected result", rtol=1e-15, atol=1e-15) def test_unbounded_below_no_presolve_original(self): # formerly caused segfault in TravisCI w/ "cholesky":True c = [-1] bounds = [(None, 1)] res = linprog(c=c, bounds=bounds, method=self.method, options={"presolve": False, "cholesky": True}) _assert_success(res, desired_fun=-1) def test_cholesky(self): # use cholesky factorization and triangular solves A, b, c = lpgen_2d(20, 20) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"cholesky": True}) # only for dense _assert_success(res, desired_fun=-64.049494229) def test_alternate_initial_point(self): # use "improved" initial point A, b, c = lpgen_2d(20, 20) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "Solving system with option...") sup.filter(LinAlgWarning, "Ill-conditioned matrix...") res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"ip": True, "disp": True}) # ip code is independent of sparse/dense _assert_success(res, desired_fun=-64.049494229) def test_bug_8664(self): # interior-point has trouble with this when presolve is off c = [4] A_ub = [[2], [5]] b_ub = [4, 4] A_eq = [[0], [-8], [9]] b_eq = [3, 2, 10] with suppress_warnings() as sup: sup.filter(RuntimeWarning) sup.filter(OptimizeWarning, "Solving system with option...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options={"presolve": False}) assert_(not res.success, "Incorrectly reported success") ######################################## # Revised Simplex Option-Specific Tests# ######################################## class TestLinprogRSCommon(LinprogRSTests): options = {} def test_cyclic_bland(self): pytest.skip("Intermittent failure acceptable.") def test_nontrivial_problem_with_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_unbounded_variables(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bounds = [(None, None), (None, None), (0, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bounded_variables(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bounds = [(None, 1), (1, None), (0, None), (.4, .6)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_negative_unbounded_variable(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() b_eq = [4] x_star = np.array([-219/385, 582/385, 0, 4/10]) f_star = 3951/385 bounds = [(None, None), (1, None), (0, None), (.4, .6)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bad_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bad_guess = [1, 2, 3, .5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=bad_guess) assert_equal(res.status, 6) def test_redundant_constraints_with_guess(self): A, b, c, N = magic_square(3) p = np.random.rand(*c.shape) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_eq=A, b_eq=b, method=self.method) res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x) res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x) _assert_success(res2, desired_fun=1.730550597) assert_equal(res2.nit, 0) _assert_success(res3) assert_(res3.nit < res.nit) # hot start reduces iterations class TestLinprogRSBland(LinprogRSTests): options = {"pivot": "bland"} ############################################ # HiGHS-Simplex-Dual Option-Specific Tests # ############################################ class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests): method = "highs-ds" options = {} def test_lad_regression(self): '''The scaled model should be optimal but unscaled model infeasible.''' c, A_ub, b_ub, bnds = l1_regression_prob() res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds, method=self.method, options=self.options) assert_equal(res.status, 4) assert_('An optimal solution to the scaled ' 'model was found but' in res.message) assert_(res.x is not None) assert_(np.all(res.slack > -1e-6)) assert_(np.all(res.x <= [np.inf if u is None else u for l, u in bnds])) assert_(np.all(res.x >= [-np.inf if l is None else l for l, u in bnds])) ################################### # HiGHS-IPM Option-Specific Tests # ################################### class TestLinprogHiGHSIPM(LinprogHiGHSTests): method = "highs-ipm" options = {} ########################### # Autoscale-Specific Tests# ########################### class AutoscaleTests: options = {"autoscale": True} test_bug_6139 = LinprogCommonTests.test_bug_6139 test_bug_6690 = LinprogCommonTests.test_bug_6690 test_bug_7237 = LinprogCommonTests.test_bug_7237 class TestAutoscaleIP(AutoscaleTests): method = "interior-point" def test_bug_6139(self): self.options['tol'] = 1e-10 return AutoscaleTests.test_bug_6139(self) class TestAutoscaleSimplex(AutoscaleTests): method = "simplex" class TestAutoscaleRS(AutoscaleTests): method = "revised simplex" def test_nontrivial_problem_with_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bad_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bad_guess = [1, 2, 3, .5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=bad_guess) assert_equal(res.status, 6) ########################### # Redundancy Removal Tests# ########################### class RRTests: method = "interior-point" LCT = LinprogCommonTests # these are a few of the existing tests that have redundancy test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility test_bug_10349 = LCT.test_bug_10349 test_bug_7044 = LCT.test_bug_7044 test_NFLC = LCT.test_network_flow_limited_capacity test_enzo_example_b = LCT.test_enzo_example_b class TestRRSVD(RRTests): options = {"rr_method": "SVD"} class TestRRPivot(RRTests): options = {"rr_method": "pivot"} class TestRRID(RRTests): options = {"rr_method": "ID"}
39.008146
107
0.542132
import sys import numpy as np from numpy.testing import (assert_, assert_allclose, assert_equal, assert_array_less, assert_warns, suppress_warnings) from pytest import raises as assert_raises from scipy.optimize import linprog, OptimizeWarning from scipy.sparse.linalg import MatrixRankWarning from scipy.linalg import LinAlgWarning import scipy.sparse import pytest has_umfpack = True try: from scikits.umfpack import UmfpackWarning except ImportError: has_umfpack = False has_cholmod = True try: import sksparse from sksparse.cholmod import cholesky as cholmod except ImportError: has_cholmod = False def _assert_iteration_limit_reached(res, maxiter): assert_(not res.success, "Incorrectly reported success") assert_(res.success < maxiter, "Incorrectly reported number of iterations") assert_equal(res.status, 1, "Failed to report iteration limit reached") def _assert_infeasible(res): assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 2, "failed to report infeasible status") def _assert_unbounded(res): assert_(not res.success, "incorrectly reported success") assert_equal(res.status, 3, "failed to report unbounded status") def _assert_unable_to_find_basic_feasible_sol(res): assert_(not res.success, "incorrectly reported success") assert_(res.status in (2, 4), "failed to report optimization failure") def _assert_success(res, desired_fun=None, desired_x=None, rtol=1e-8, atol=1e-8): if not res.success: msg = "linprog status {0}, message: {1}".format(res.status, res.message) raise AssertionError(msg) assert_equal(res.status, 0) if desired_fun is not None: assert_allclose(res.fun, desired_fun, err_msg="converged to an unexpected objective value", rtol=rtol, atol=atol) if desired_x is not None: assert_allclose(res.x, desired_x, err_msg="converged to an unexpected solution", rtol=rtol, atol=atol) def magic_square(n): np.random.seed(0) M = n * (n**2 + 1) / 2 numbers = np.arange(n**4) // n**2 + 1 numbers = numbers.reshape(n**2, n, n) zeros = np.zeros((n**2, n, n)) A_list = [] b_list = [] for i in range(n**2): A_row = zeros.copy() A_row[i, :, :] = 1 A_list.append(A_row.flatten()) b_list.append(1) for i in range(n): for j in range(n): A_row = zeros.copy() A_row[:, i, j] = 1 A_list.append(A_row.flatten()) b_list.append(1) for i in range(n): A_row = zeros.copy() A_row[:, i, :] = numbers[:, i, :] A_list.append(A_row.flatten()) b_list.append(M) for i in range(n): A_row = zeros.copy() A_row[:, :, i] = numbers[:, :, i] A_list.append(A_row.flatten()) b_list.append(M) A_row = zeros.copy() A_row[:, range(n), range(n)] = numbers[:, range(n), range(n)] A_list.append(A_row.flatten()) b_list.append(M) A_row = zeros.copy() A_row[:, range(n), range(-1, -n - 1, -1)] = \ numbers[:, range(n), range(-1, -n - 1, -1)] A_list.append(A_row.flatten()) b_list.append(M) A = np.array(np.vstack(A_list), dtype=float) b = np.array(b_list, dtype=float) c = np.random.rand(A.shape[1]) return A, b, c, numbers def lpgen_2d(m, n): np.random.seed(0) c = - np.random.exponential(size=(m, n)) Arow = np.zeros((m, m * n)) brow = np.zeros(m) for j in range(m): j1 = j + 1 Arow[j, j * n:j1 * n] = 1 brow[j] = n / m Acol = np.zeros((n, m * n)) bcol = np.zeros(n) for j in range(n): j1 = j + 1 Acol[j, j::n] = 1 bcol[j] = 1 A = np.vstack((Arow, Acol)) b = np.hstack((brow, bcol)) return A, b, c.ravel() def very_random_gen(seed=0): np.random.seed(seed) m_eq, m_ub, n = 10, 20, 50 c = np.random.rand(n)-0.5 A_ub = np.random.rand(m_ub, n)-0.5 b_ub = np.random.rand(m_ub)-0.5 A_eq = np.random.rand(m_eq, n)-0.5 b_eq = np.random.rand(m_eq)-0.5 lb = -np.random.rand(n) ub = np.random.rand(n) lb[lb < -np.random.rand()] = -np.inf ub[ub > np.random.rand()] = np.inf bounds = np.vstack((lb, ub)).T return c, A_ub, b_ub, A_eq, b_eq, bounds def nontrivial_problem(): c = [-1, 8, 4, -6] A_ub = [[-7, -7, 6, 9], [1, -1, -3, 0], [10, -10, -7, 7], [6, -1, 3, 4]] b_ub = [-3, 6, -6, 6] A_eq = [[-10, 1, 1, -8]] b_eq = [-4] x_star = [101 / 1391, 1462 / 1391, 0, 752 / 1391] f_star = 7083 / 1391 return c, A_ub, b_ub, A_eq, b_eq, x_star, f_star def l1_regression_prob(seed=0, m=8, d=9, n=100): np.random.seed(seed) phi = np.random.normal(0, 1, size=(m, d)) w_true = np.random.randn(m) x = np.random.normal(0, 1, size=(d, n)) y = w_true @ (phi @ x) + np.random.normal(0, 1e-5, size=n) c = np.ones(m+n) c[:m] = 0 A_ub = scipy.sparse.lil_matrix((2*n, n+m)) idx = 0 for ii in range(n): A_ub[idx, :m] = phi @ x[:, ii] A_ub[idx, m+ii] = -1 A_ub[idx+1, :m] = -1*phi @ x[:, ii] A_ub[idx+1, m+ii] = -1 idx += 2 A_ub = A_ub.tocsc() b_ub = np.zeros(2*n) b_ub[0::2] = y b_ub[1::2] = -y bnds = [(None, None)]*m + [(0, None)]*n return c, A_ub, b_ub, bnds def generic_callback_test(self): last_cb = {} def cb(res): message = res.pop('message') complete = res.pop('complete') assert_(res.pop('phase') in (1, 2)) assert_(res.pop('status') in range(4)) assert_(isinstance(res.pop('nit'), int)) assert_(isinstance(complete, bool)) assert_(isinstance(message, str)) last_cb['x'] = res['x'] last_cb['fun'] = res['fun'] last_cb['slack'] = res['slack'] last_cb['con'] = res['con'] c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) assert_allclose(last_cb['fun'], res['fun']) assert_allclose(last_cb['x'], res['x']) assert_allclose(last_cb['con'], res['con']) assert_allclose(last_cb['slack'], res['slack']) def test_unknown_solvers_and_options(): c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, method='ekki-ekki-ekki') assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, method='highs-ekki') assert_raises(ValueError, linprog, c, A_ub=A_ub, b_ub=b_ub, options={"rr_method": 'ekki-ekki-ekki'}) def test_choose_solver(): c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub, b_ub, method='highs') _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) A_ub = None b_ub = None A_eq = None b_eq = None bounds = None class LinprogCommonTests: def test_callback(self): generic_callback_test(self) def test_disp(self): A, b, c = lpgen_2d(20, 20) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"disp": True}) _assert_success(res, desired_fun=-64.049494229) def test_docstring_example(self): c = [-1, 4] A = [[-3, 1], [1, 2]] b = [6, 4] x0_bounds = (None, None) x1_bounds = (-3, None) res = linprog(c, A_ub=A, b_ub=b, bounds=(x0_bounds, x1_bounds), options=self.options, method=self.method) _assert_success(res, desired_fun=-22) def test_type_error(self): c = [1] A_eq = [[1]] b_eq = "hello" assert_raises(TypeError, linprog, c, A_eq=A_eq, b_eq=b_eq, method=self.method, options=self.options) def test_aliasing_b_ub(self): c = np.array([1.0]) A_ub = np.array([[1.0]]) b_ub_orig = np.array([3.0]) b_ub = b_ub_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-4, desired_x=[-4]) assert_allclose(b_ub_orig, b_ub) def test_aliasing_b_eq(self): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq_orig = np.array([3.0]) b_eq = b_eq_orig.copy() bounds = (-4.0, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) assert_allclose(b_eq_orig, b_eq) def test_non_ndarray_args(self): c = [1.0] A_ub = [[1.0]] b_ub = [3.0] A_eq = [[1.0]] b_eq = [2.0] bounds = (-1.0, 10.0) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=2, desired_x=[2]) def test_unknown_options(self): c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, options={}): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=options) o = {key: self.options[key] for key in self.options} o['spam'] = 42 assert_warns(OptimizeWarning, f, c, A_ub=A_ub, b_ub=b_ub, options=o) def test_invalid_inputs(self): def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4)]) assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, 2), (3, 4), (3, 4, 5)]) assert_raises(ValueError, f, [1, 2, 3], bounds=[(1, -2), (1, 2)]) assert_raises(ValueError, f, [1, 2], A_ub=[[1, 2]], b_ub=[1, 2]) assert_raises(ValueError, f, [1, 2], A_ub=[[1]], b_ub=[1]) assert_raises(ValueError, f, [1, 2], A_eq=[[1, 2]], b_eq=[1, 2]) assert_raises(ValueError, f, [1, 2], A_eq=[[1]], b_eq=[1]) assert_raises(ValueError, f, [1, 2], A_eq=[1], b_eq=1) if ("_sparse_presolve" in self.options and self.options["_sparse_presolve"]): return # there aren't 3-D sparse matrices assert_raises(ValueError, f, [1, 2], A_ub=np.zeros((1, 1, 3)), b_eq=1) def test_sparse_constraints(self): def f(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None): linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) np.random.seed(0) m = 100 n = 150 A_eq = scipy.sparse.rand(m, n, 0.5) x_valid = np.random.randn((n)) c = np.random.randn((n)) ub = x_valid + np.random.rand((n)) lb = x_valid - np.random.rand((n)) bounds = np.column_stack((lb, ub)) b_eq = A_eq * x_valid if self.method in {'simplex', 'revised simplex'}: with assert_raises(ValueError, match=f"Method '{self.method}' " "does not support sparse constraint matrices."): linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) else: options = {**self.options} if self.method in {'interior-point'}: options['sparse'] = True res = linprog(c=c, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=options) assert res.success def test_maxiter(self): c = [4, 8, 3, 0, 0, 0] A = [ [2, 5, 3, -1, 0, 0], [3, 2.5, 8, 0, -1, 0], [8, 10, 4, 0, 0, -1]] b = [185, 155, 600] np.random.seed(0) maxiter = 3 res = linprog(c, A_eq=A, b_eq=b, method=self.method, options={"maxiter": maxiter}) _assert_iteration_limit_reached(res, maxiter) assert_equal(res.nit, maxiter) def test_bounds_fixed(self): do_presolve = self.options.get('presolve', True) res = linprog([1], bounds=(1, 1), method=self.method, options=self.options) _assert_success(res, 1, 1) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 2, 3], bounds=[(5, 5), (-1, -1), (3, 3)], method=self.method, options=self.options) _assert_success(res, 12, [5, -1, 3]) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 1], bounds=[(1, 1), (1, 3)], method=self.method, options=self.options) _assert_success(res, 2, [1, 1]) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 1, 2], A_eq=[[1, 0, 0], [0, 1, 0]], b_eq=[1, 7], bounds=[(-5, 5), (0, 10), (3.5, 3.5)], method=self.method, options=self.options) _assert_success(res, 15, [1, 7, 3.5]) if do_presolve: assert_equal(res.nit, 0) def test_bounds_infeasible(self): do_presolve = self.options.get('presolve', True) res = linprog([1], bounds=(1, -2), method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog([1], bounds=[(1, -2)], method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog([1, 2, 3], bounds=[(5, 0), (1, 2), (3, 4)], method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) def test_bounds_infeasible_2(self): do_presolve = self.options.get('presolve', True) simplex_without_presolve = not do_presolve and self.method == 'simplex' c = [1, 2, 3] bounds_1 = [(1, 2), (np.inf, np.inf), (3, 4)] bounds_2 = [(1, 2), (-np.inf, -np.inf), (3, 4)] if simplex_without_presolve: def g(c, bounds): res = linprog(c, bounds=bounds, method=self.method, options=self.options) return res with pytest.warns(RuntimeWarning): with pytest.raises(IndexError): g(c, bounds=bounds_1) with pytest.warns(RuntimeWarning): with pytest.raises(IndexError): g(c, bounds=bounds_2) else: res = linprog(c=c, bounds=bounds_1, method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) res = linprog(c=c, bounds=bounds_2, method=self.method, options=self.options) _assert_infeasible(res) if do_presolve: assert_equal(res.nit, 0) def test_empty_constraint_1(self): c = [-1, -2] res = linprog(c, method=self.method, options=self.options) _assert_unbounded(res) def test_empty_constraint_2(self): c = [-1, 1, -1, 1] bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] res = linprog(c, bounds=bounds, method=self.method, options=self.options) _assert_unbounded(res) if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_empty_constraint_3(self): c = [1, -1, 1, -1] bounds = [(0, np.inf), (-np.inf, 0), (-1, 1), (-1, 1)] res = linprog(c, bounds=bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 0, -1, 1], desired_fun=-2) def test_inequality_constraints(self): c = np.array([3, 2]) * -1 A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-18, desired_x=[2, 6]) def test_inequality_constraints2(self): c = [6, 3] A_ub = [[0, 3], [-1, -1], [-2, 1]] b_ub = [2, -1, -1] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=5, desired_x=[2 / 3, 1 / 3]) def test_bounds_simple(self): c = [1, 2] bounds = (1, 2) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[1, 1]) bounds = [(1, 2), (1, 2)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[1, 1]) def test_bounded_below_only_1(self): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (1.0, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) def test_bounded_below_only_2(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (0.5, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounded_above_only_1(self): c = np.array([1.0]) A_eq = np.array([[1.0]]) b_eq = np.array([3.0]) bounds = (None, 10.0) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3, desired_x=[3]) def test_bounded_above_only_2(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (-np.inf, 4) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounds_infinity(self): c = np.ones(3) A_eq = np.eye(3) b_eq = np.array([1, 2, 3]) bounds = (-np.inf, np.inf) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=b_eq, desired_fun=np.sum(b_eq)) def test_bounds_mixed(self): c = np.array([-1, 4]) * -1 A_ub = np.array([[-3, 1], [1, 2]], dtype=np.float64) b_ub = [6, 4] x0_bounds = (-np.inf, np.inf) x1_bounds = (-3, np.inf) bounds = (x0_bounds, x1_bounds) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-80 / 7, desired_x=[-8 / 7, 18 / 7]) def test_bounds_equal_but_infeasible(self): c = [-4, 1] A_ub = [[7, -2], [0, 1], [2, -2]] b_ub = [14, 0, 3] bounds = [(2, 2), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bounds_equal_but_infeasible2(self): c = [-4, 1] A_eq = [[7, -2], [0, 1], [2, -2]] b_eq = [14, 0, 3] bounds = [(2, 2), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bounds_equal_no_presolve(self): c = [1, 2] A_ub = [[1, 2], [1.1, 2.2]] b_ub = [4, 8] bounds = [(1, 2), (2, 2)] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_infeasible(res) def test_zero_column_1(self): m, n = 3, 4 np.random.seed(0) c = np.random.rand(n) c[1] = 1 A_eq = np.random.rand(m, n) A_eq[:, 1] = 0 b_eq = np.random.rand(m) A_ub = [[1, 0, 1, 1]] b_ub = 3 bounds = [(-10, 10), (-10, 10), (-10, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-9.7087836730413404) def test_zero_column_2(self): np.random.seed(0) m, n = 2, 4 c = np.random.rand(n) c[1] = -1 A_eq = np.random.rand(m, n) A_eq[:, 1] = 0 b_eq = np.random.rand(m) A_ub = np.random.rand(m, n) A_ub[:, 1] = 0 b_ub = np.random.rand(m) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_zero_row_1(self): c = [1, 2, 3] A_eq = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] b_eq = [0, 3, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=3) def test_zero_row_2(self): A_ub = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] b_ub = [0, 3, 0] c = [1, 2, 3] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0) def test_zero_row_3(self): m, n = 2, 4 c = np.random.rand(n) A_eq = np.random.rand(m, n) A_eq[0, :] = 0 b_eq = np.random.rand(m) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_zero_row_4(self): m, n = 2, 4 c = np.random.rand(n) A_ub = np.random.rand(m, n) A_ub[0, :] = 0 b_ub = -np.random.rand(m) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_eq_1(self): c = [1, 1, 1, 2] A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] b_eq = [1, 2, 2, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_eq_2(self): c = [1, 1, 1, 2] A_eq = [[1, 0, 0, 0], [0, 2, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]] b_eq = [1, 2, 1, 4] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=4) def test_singleton_row_ub_1(self): c = [1, 1, 1, 2] A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] b_ub = [1, 2, -2, 4] bounds = [(None, None), (0, None), (0, None), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_singleton_row_ub_2(self): c = [1, 1, 1, 2] A_ub = [[1, 0, 0, 0], [0, 2, 0, 0], [-1, 0, 0, 0], [1, 1, 1, 1]] b_ub = [1, 2, -0.5, 4] bounds = [(None, None), (0, None), (0, None), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0.5) def test_infeasible(self): c = [-1, -1] A_ub = [[1, 0], [0, 1], [-1, -1]] b_ub = [2, 2, -5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_infeasible_inequality_bounds(self): c = [1] A_ub = [[2]] b_ub = 4 bounds = (5, 6) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) if self.options.get('presolve', True): assert_equal(res.nit, 0) def test_unbounded(self): c = np.array([1, 1]) * -1 A_ub = [[-1, 1], [-1, -1]] b_ub = [-1, -2] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) def test_unbounded_below_no_presolve_corrected(self): c = [1] bounds = [(None, 1)] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c=c, bounds=bounds, method=self.method, options=o) if self.method == "revised simplex": assert_equal(res.status, 5) else: _assert_unbounded(res) def test_unbounded_no_nontrivial_constraints_1(self): c = np.array([0, 0, 0, 1, -1, -1]) A_ub = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1]]) b_ub = np.array([2, -2, 0]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1), (0, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) if not self.method.lower().startswith("highs"): assert_equal(res.x[-1], np.inf) assert_equal(res.message[:36], "The problem is (trivially) unbounded") def test_unbounded_no_nontrivial_constraints_2(self): c = np.array([0, 0, 0, 1, -1, 1]) A_ub = np.array([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]) b_ub = np.array([2, -2, 0]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1), (None, 0)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) if not self.method.lower().startswith("highs"): assert_equal(res.x[-1], -np.inf) assert_equal(res.message[:36], "The problem is (trivially) unbounded") def test_cyclic_recovery(self): c = np.array([100, 10, 1]) * -1 A_ub = [[1, 0, 0], [20, 1, 0], [200, 20, 1]] b_ub = [1, 100, 10000] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 0, 10000], atol=5e-6, rtol=1e-7) def test_cyclic_bland(self): c = np.array([-10, 57, 9, 24.]) A_ub = np.array([[0.5, -5.5, -2.5, 9], [0.5, -1.5, -0.5, 1], [1, 0, 0, 0]]) b_ub = [0, 0, 1] # copy the existing options dictionary but change maxiter maxiter = 100 o = {key: val for key, val in self.options.items()} o['maxiter'] = maxiter res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) if self.method == 'simplex' and not self.options.get('bland'): # simplex cycles without Bland's rule _assert_iteration_limit_reached(res, o['maxiter']) else: _assert_success(res, desired_x=[1, 0, 1, 0]) # note that revised simplex skips this test because it may or may not # cycle depending on the initial basis def test_remove_redundancy_infeasibility(self): # mostly a test of redundancy removal, which is carefully tested in # test__remove_redundancy.py m, n = 10, 10 c = np.random.rand(n) A_eq = np.random.rand(m, n) b_eq = np.random.rand(m) A_eq[-1, :] = 2 * A_eq[-2, :] b_eq[-1] *= -1 with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) ################# # General Tests # ################# def test_nontrivial_problem(self): # Problem involves all constraint types, # negative resource limits, and rounding issues. c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=f_star, desired_x=x_star) def test_lpgen_problem(self): # Test linprog with a rather large problem (400 variables, # 40 constraints) generated by https://gist.github.com/denis-bz/8647461 A_ub, b_ub, c = lpgen_2d(20, 20) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-64.049494229) def test_network_flow(self): # A network flow problem with supply and demand at nodes # and with costs along directed edges. # https://www.princeton.edu/~rvdb/542/lectures/lec10.pdf c = [2, 4, 9, 11, 4, 3, 8, 7, 0, 15, 16, 18] n, p = -1, 1 A_eq = [ [n, n, p, 0, p, 0, 0, 0, 0, p, 0, 0], [p, 0, 0, p, 0, p, 0, 0, 0, 0, 0, 0], [0, 0, n, n, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, p, p, 0, 0, p, 0], [0, 0, 0, 0, n, n, n, 0, p, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, n, n, 0, 0, p], [0, 0, 0, 0, 0, 0, 0, 0, 0, n, n, n]] b_eq = [0, 19, -16, 33, 0, 0, -36] with suppress_warnings() as sup: sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=755, atol=1e-6, rtol=1e-7) def test_network_flow_limited_capacity(self): # A network flow problem with supply and demand at nodes # and with costs and capacities along directed edges. # http://blog.sommer-forst.de/2013/04/10/ c = [2, 2, 1, 3, 1] bounds = [ [0, 4], [0, 2], [0, 2], [0, 3], [0, 5]] n, p = -1, 1 A_eq = [ [n, n, 0, 0, 0], [p, 0, n, n, 0], [0, p, p, 0, n], [0, 0, 0, p, p]] b_eq = [-4, 0, 0, 4] with suppress_warnings() as sup: # this is an UmfpackWarning but I had trouble importing it if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(OptimizeWarning, "Solving system with option...") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=14) def test_simplex_algorithm_wikipedia_example(self): # https://en.wikipedia.org/wiki/Simplex_algorithm#Example c = [-2, -3, -4] A_ub = [ [3, 2, 1], [2, 5, 3]] b_ub = [10, 15] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-20) def test_enzo_example(self): # https://github.com/scipy/scipy/issues/1779 lp2.py # # Translated from Octave code at: # http://www.ecs.shimane-u.ac.jp/~kyoshida/lpeng.htm # and placed under MIT licence by Enzo Michelangeli # with permission explicitly granted by the original author, # Prof. Kazunobu Yoshida c = [4, 8, 3, 0, 0, 0] A_eq = [ [2, 5, 3, -1, 0, 0], [3, 2.5, 8, 0, -1, 0], [8, 10, 4, 0, 0, -1]] b_eq = [185, 155, 600] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=317.5, desired_x=[66.25, 0, 17.5, 0, 183.75, 0], atol=6e-6, rtol=1e-7) def test_enzo_example_b(self): # rescued from https://github.com/scipy/scipy/pull/218 c = [2.8, 6.3, 10.8, -2.8, -6.3, -10.8] A_eq = [[-1, -1, -1, 0, 0, 0], [0, 0, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1]] b_eq = [-0.5, 0.4, 0.3, 0.3, 0.3] with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-1.77, desired_x=[0.3, 0.2, 0.0, 0.0, 0.1, 0.3]) def test_enzo_example_c_with_degeneracy(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 20 c = -np.ones(m) tmp = 2 * np.pi * np.arange(1, m + 1) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [0, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0, desired_x=np.zeros(m)) def test_enzo_example_c_with_unboundedness(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2 * np.pi * np.arange(m) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [0, 0] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_unbounded(res) def test_enzo_example_c_with_infeasibility(self): # rescued from https://github.com/scipy/scipy/pull/218 m = 50 c = -np.ones(m) tmp = 2 * np.pi * np.arange(m) / (m + 1) A_eq = np.vstack((np.cos(tmp) - 1, np.sin(tmp))) b_eq = [1, 1] o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_infeasible(res) def test_basic_artificial_vars(self): # Problem is chosen to test two phase simplex methods when at the end # of phase 1 some artificial variables remain in the basis. # Also, for `method='simplex'`, the row in the tableau corresponding # with the artificial variables is not all zero. c = np.array([-0.1, -0.07, 0.004, 0.004, 0.004, 0.004]) A_ub = np.array([[1.0, 0, 0, 0, 0, 0], [-1.0, 0, 0, 0, 0, 0], [0, -1.0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0], [1.0, 1.0, 0, 0, 0, 0]]) b_ub = np.array([3.0, 3.0, 3.0, 3.0, 20.0]) A_eq = np.array([[1.0, 0, -1, 1, -1, 1], [0, -1.0, -1, 1, -1, 1]]) b_eq = np.array([0, 0]) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=0, desired_x=np.zeros_like(c), atol=2e-6) def test_optimize_result(self): # check all fields in OptimizeResult c, A_ub, b_ub, A_eq, b_eq, bounds = very_random_gen(0) res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) assert_(res.success) assert_(res.nit) assert_(not res.status) assert_(res.message == "Optimization terminated successfully.") assert_allclose(c @ res.x, res.fun) assert_allclose(b_eq - A_eq @ res.x, res.con, atol=1e-11) assert_allclose(b_ub - A_ub @ res.x, res.slack, atol=1e-11) ################# # Bug Fix Tests # ################# def test_bug_5400(self): # https://github.com/scipy/scipy/issues/5400 bounds = [ (0, None), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 100), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, 900), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)] f = 1 / 9 g = -1e4 h = -3.1 A_ub = np.array([ [1, -2.99, 0, 0, -3, 0, 0, 0, -1, -1, 0, -1, -1, 1, 1, 0, 0, 0, 0], [1, 0, -2.9, h, 0, -3, 0, -1, 0, 0, -1, 0, -1, 0, 0, 1, 1, 0, 0], [1, 0, 0, h, 0, 0, -3, -1, -1, 0, -1, -1, 0, 0, 0, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], [0, 1.99, -1, -1, 0, 0, 0, -1, f, f, 0, 0, 0, g, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, -1, -1, 0, 0, 0, -1, f, f, 0, g, 0, 0, 0, 0], [0, -1, 1.9, 2.1, 0, 0, 0, f, -1, -1, 0, 0, 0, 0, 0, g, 0, 0, 0], [0, 0, 0, 0, -1, 2, -1, 0, 0, 0, f, -1, f, 0, 0, 0, g, 0, 0], [0, -1, -1, 2.1, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, 0, 0, g, 0], [0, 0, 0, 0, -1, -1, 2, 0, 0, 0, f, f, -1, 0, 0, 0, 0, 0, g]]) b_ub = np.array([ 0.0, 0, 0, 100, 100, 100, 100, 100, 100, 900, 900, 900, 900, 900, 900, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) c = np.array([-1.0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-106.63507541835018) def test_bug_6139(self): # linprog(method='simplex') fails to find a basic feasible solution # if phase 1 pseudo-objective function is outside the provided tol. # https://github.com/scipy/scipy/issues/6139 # Note: This is not strictly a bug as the default tolerance determines # if a result is "close enough" to zero and should not be expected # to work for all cases. c = np.array([1, 1, 1]) A_eq = np.array([[1., 0., 0.], [-1000., 0., - 1000.]]) b_eq = np.array([5.00000000e+00, -1.00000000e+04]) A_ub = -np.array([[0., 1000000., 1010000.]]) b_ub = -np.array([10000000.]) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=14.95, desired_x=np.array([5, 4.95, 5])) def test_bug_6690(self): # linprog simplex used to violate bound constraint despite reporting # success. # https://github.com/scipy/scipy/issues/6690 A_eq = np.array([[0, 0, 0, 0.93, 0, 0.65, 0, 0, 0.83, 0]]) b_eq = np.array([0.9626]) A_ub = np.array([ [0, 0, 0, 1.18, 0, 0, 0, -0.2, 0, -0.22], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0.43, 0, 0, 0, 0, 0, 0], [0, -1.22, -0.25, 0, 0, 0, -2.06, 0, 0, 1.37], [0, 0, 0, 0, 0, 0, 0, -0.25, 0, 0] ]) b_ub = np.array([0.615, 0, 0.172, -0.869, -0.022]) bounds = np.array([ [-0.84, -0.97, 0.34, 0.4, -0.33, -0.74, 0.47, 0.09, -1.45, -0.73], [0.37, 0.02, 2.86, 0.86, 1.18, 0.5, 1.76, 0.17, 0.32, -0.15] ]).T c = np.array([ -1.64, 0.7, 1.8, -1.06, -1.16, 0.26, 2.13, 1.53, 0.66, 0.28 ]) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(OptimizeWarning, "Solving system with option 'cholesky'") sup.filter(OptimizeWarning, "Solving system with option 'sym_pos'") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) desired_fun = -1.19099999999 desired_x = np.array([0.3700, -0.9700, 0.3400, 0.4000, 1.1800, 0.5000, 0.4700, 0.0900, 0.3200, -0.7300]) _assert_success(res, desired_fun=desired_fun, desired_x=desired_x) # Add small tol value to ensure arrays are less than or equal. atol = 1e-6 assert_array_less(bounds[:, 0] - atol, res.x) assert_array_less(res.x, bounds[:, 1] + atol) def test_bug_7044(self): # linprog simplex failed to "identify correct constraints" (?) # leading to a non-optimal solution if A is rank-deficient. # https://github.com/scipy/scipy/issues/7044 A_eq, b_eq, c, N = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) desired_fun = 1.730550597 _assert_success(res, desired_fun=desired_fun) assert_allclose(A_eq.dot(res.x), b_eq) assert_array_less(np.zeros(res.x.size) - 1e-5, res.x) def test_bug_7237(self): # https://github.com/scipy/scipy/issues/7237 # linprog simplex "explodes" when the pivot value is very # close to zero. c = np.array([-1, 0, 0, 0, 0, 0, 0, 0, 0]) A_ub = np.array([ [1., -724., 911., -551., -555., -896., 478., -80., -293.], [1., 566., 42., 937., 233., 883., 392., -909., 57.], [1., -208., -894., 539., 321., 532., -924., 942., 55.], [1., 857., -859., 83., 462., -265., -971., 826., 482.], [1., 314., -424., 245., -424., 194., -443., -104., -429.], [1., 540., 679., 361., 149., -827., 876., 633., 302.], [0., -1., -0., -0., -0., -0., -0., -0., -0.], [0., -0., -1., -0., -0., -0., -0., -0., -0.], [0., -0., -0., -1., -0., -0., -0., -0., -0.], [0., -0., -0., -0., -1., -0., -0., -0., -0.], [0., -0., -0., -0., -0., -1., -0., -0., -0.], [0., -0., -0., -0., -0., -0., -1., -0., -0.], [0., -0., -0., -0., -0., -0., -0., -1., -0.], [0., -0., -0., -0., -0., -0., -0., -0., -1.], [0., 1., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 0., 0., 0., 1.] ]) b_ub = np.array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1.]) A_eq = np.array([[0., 1., 1., 1., 1., 1., 1., 1., 1.]]) b_eq = np.array([[1.]]) bounds = [(None, None)] * 9 res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=108.568535, atol=1e-6) def test_bug_8174(self): # https://github.com/scipy/scipy/issues/8174 # The simplex method sometimes "explodes" if the pivot value is very # close to zero. A_ub = np.array([ [22714, 1008, 13380, -2713.5, -1116], [-4986, -1092, -31220, 17386.5, 684], [-4986, 0, 0, -2713.5, 0], [22714, 0, 0, 17386.5, 0]]) b_ub = np.zeros(A_ub.shape[0]) c = -np.ones(A_ub.shape[1]) bounds = [(0, 1)] * A_ub.shape[1] with suppress_warnings() as sup: sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) if self.options.get('tol', 1e-9) < 1e-10 and self.method == 'simplex': _assert_unable_to_find_basic_feasible_sol(res) else: _assert_success(res, desired_fun=-2.0080717488789235, atol=1e-6) def test_bug_8174_2(self): # Test supplementary example from issue 8174. # https://github.com/scipy/scipy/issues/8174 # https://stackoverflow.com/questions/47717012/linprog-in-scipy-optimize-checking-solution c = np.array([1, 0, 0, 0, 0, 0, 0]) A_ub = -np.identity(7) b_ub = np.array([[-2], [-2], [-2], [-2], [-2], [-2], [-2]]) A_eq = np.array([ [1, 1, 1, 1, 1, 1, 0], [0.3, 1.3, 0.9, 0, 0, 0, -1], [0.3, 0, 0, 0, 0, 0, -2/3], [0, 0.65, 0, 0, 0, 0, -1/15], [0, 0, 0.3, 0, 0, 0, -1/15] ]) b_eq = np.array([[100], [0], [0], [0], [0]]) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=43.3333333331385) def test_bug_8561(self): # Test that pivot row is chosen correctly when using Bland's rule c = np.array([7, 0, -4, 1.5, 1.5]) A_ub = np.array([ [4, 5.5, 1.5, 1.0, -3.5], [1, -2.5, -2, 2.5, 0.5], [3, -0.5, 4, -12.5, -7], [-1, 4.5, 2, -3.5, -2], [5.5, 2, -4.5, -1, 9.5]]) b_ub = np.array([0, 0, 0, 0, 1]) res = linprog(c, A_ub=A_ub, b_ub=b_ub, options=self.options, method=self.method) _assert_success(res, desired_x=[0, 0, 19, 16/3, 29/3]) def test_bug_8662(self): c = [-10, 10, 6, 3] A_ub = [[8, -8, -4, 6], [-8, 8, 4, -6], [-4, 4, 8, -4], [3, -3, -3, -10]] b_ub = [9, -9, -9, -4] bounds = [(0, None), (0, None), (0, None), (0, None)] desired_fun = 36.0000000000 with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res1 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) A_ub.append([0, 0, -1, 0]) b_ub.append(0) bounds[2] = (None, None) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res2 = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) rtol = 1e-5 _assert_success(res1, desired_fun=desired_fun, rtol=rtol) _assert_success(res2, desired_fun=desired_fun, rtol=rtol) def test_bug_8663(self): c = [1, 5] A_eq = [[0, -7]] b_eq = [-6] bounds = [(0, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[0, 6./7], desired_fun=5*6./7) def test_bug_8664(self): c = [4] A_ub = [[2], [5]] b_ub = [4, 4] A_eq = [[0], [-8], [9]] b_eq = [3, 2, 10] with suppress_warnings() as sup: sup.filter(RuntimeWarning) sup.filter(OptimizeWarning, "Solving system with option...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_infeasible(res) def test_bug_8973(self): c = np.array([0, 0, 0, 1, -1]) A_ub = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]]) b_ub = np.array([2, -2]) bounds = [(None, None), (None, None), (None, None), (-1, 1), (-1, 1)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_fun=-2) assert_equal(c @ res.x, res.fun) def test_bug_8973_2(self): c = np.zeros(1) A_ub = np.array([[1]]) b_ub = np.array([-2]) bounds = (None, None) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[-2], desired_fun=0) def test_bug_10124(self): c = np.zeros(1) A_ub = np.array([[1]]) b_ub = np.array([-2]) bounds = (None, None) c = [-1, 4] A_ub = [[-3, 1], [1, 2]] b_ub = [6, 4] bounds = [(None, None), (-3, None)] o = {"disp": True} o.update(self.options) res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_x=[10, -3], desired_fun=-22) def test_bug_10349(self): A_eq = np.array([[1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 1, 0, 0, 0, 1]]) b_eq = np.array([221, 210, 10, 141, 198, 102]) c = np.concatenate((0, 1, np.zeros(4)), axis=None) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options) _assert_success(res, desired_x=[129, 92, 12, 198, 0, 10], desired_fun=92) def test_bug_10466(self): c = [-8., -0., -8., -0., -8., -0., -0., -0., -0., -0., -0., -0., -0.] A_eq = [[1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.], [1., 0., 1., 0., 1., 0., -1., 0., 0., 0., 0., 0., 0.], [1., 0., 1., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], [1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], [1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.], [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 1.]] b_eq = [3.14572800e+08, 4.19430400e+08, 5.24288000e+08, 1.00663296e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09, 1.07374182e+09] o = {} # HiGHS methods don't use autoscale option if not self.method.startswith("highs"): o = {"autoscale": True} o.update(self.options) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "Solving system with option...") if has_umfpack: sup.filter(UmfpackWarning) sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(RuntimeWarning, "divide by zero encountered...") sup.filter(RuntimeWarning, "overflow encountered...") sup.filter(RuntimeWarning, "invalid value encountered...") sup.filter(LinAlgWarning, "Ill-conditioned matrix...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) assert_allclose(res.fun, -8589934560) class LinprogSimplexTests(LinprogCommonTests): method = "simplex" class LinprogIPTests(LinprogCommonTests): method = "interior-point" class LinprogRSTests(LinprogCommonTests): method = "revised simplex" # acknowledges this possibility, suggesting that there is a bug. On the # other hand, the pivoting rule is quite simple, and I can't find a def test_bug_5400(self): pytest.skip("Intermittent failure acceptable.") def test_bug_8662(self): pytest.skip("Intermittent failure acceptable.") def test_network_flow(self): pytest.skip("Intermittent failure acceptable.") class LinprogHiGHSTests(LinprogCommonTests): def test_callback(self): cb = lambda res: None c = np.array([-3, -2]) A_ub = [[2, 1], [1, 1], [1, 0]] b_ub = [10, 8, 4] assert_raises(NotImplementedError, linprog, c, A_ub=A_ub, b_ub=b_ub, callback=cb, method=self.method) res = linprog(c, A_ub=A_ub, b_ub=b_ub, method=self.method) _assert_success(res, desired_fun=-18.0, desired_x=[2, 6]) @pytest.mark.parametrize("options", [{"maxiter": -1}, {"disp": -1}, {"presolve": -1}, {"time_limit": -1}, {"dual_feasibility_tolerance": -1}, {"primal_feasibility_tolerance": -1}, {"ipm_optimality_tolerance": -1}, {"simplex_dual_edge_weight_strategy": "ekki"}, ]) def test_invalid_option_values(self, options): def f(options): linprog(1, method=self.method, options=options) options.update(self.options) assert_warns(OptimizeWarning, f, options=options) def test_crossover(self): c = np.array([1, 1]) * -1 A_ub = np.array([[1, 1]]) b_ub = [1] res = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds, method=self.method, options=self.options) assert_equal(res.crossover_nit == 0, self.method != "highs-ipm") class TestLinprogSimplexDefault(LinprogSimplexTests): def setup_method(self): self.options = {} def test_bug_5400(self): pytest.skip("Simplex fails on this problem.") def test_bug_7237_low_tol(self): pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): self.options.update({'tol': 1e-12}) with pytest.warns(OptimizeWarning): super(TestLinprogSimplexDefault, self).test_bug_8174() class TestLinprogSimplexBland(LinprogSimplexTests): def setup_method(self): self.options = {'bland': True} def test_bug_5400(self): pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): self.options.update({'tol': 1e-12}) with pytest.raises(AssertionError): with pytest.warns(OptimizeWarning): super(TestLinprogSimplexBland, self).test_bug_8174() class TestLinprogSimplexNoPresolve(LinprogSimplexTests): def setup_method(self): self.options = {'presolve': False} is_32_bit = np.intp(0).itemsize < 8 is_linux = sys.platform.startswith('linux') @pytest.mark.xfail( condition=is_32_bit and is_linux, reason='Fails with warning on 32-bit linux') def test_bug_5400(self): super(TestLinprogSimplexNoPresolve, self).test_bug_5400() def test_bug_6139_low_tol(self): self.options.update({'tol': 1e-12}) with pytest.raises(AssertionError, match='linprog status 4'): return super(TestLinprogSimplexNoPresolve, self).test_bug_6139() def test_bug_7237_low_tol(self): pytest.skip("Simplex fails on this problem.") def test_bug_8174_low_tol(self): self.options.update({'tol': 1e-12}) with pytest.warns(OptimizeWarning): super(TestLinprogSimplexNoPresolve, self).test_bug_8174() def test_unbounded_no_nontrivial_constraints_1(self): pytest.skip("Tests behavior specific to presolve") def test_unbounded_no_nontrivial_constraints_2(self): pytest.skip("Tests behavior specific to presolve") class TestLinprogIPDense(LinprogIPTests): options = {"sparse": False} if has_cholmod: class TestLinprogIPSparseCholmod(LinprogIPTests): options = {"sparse": True, "cholesky": True} if has_umfpack: class TestLinprogIPSparseUmfpack(LinprogIPTests): options = {"sparse": True, "cholesky": False} def test_bug_10466(self): pytest.skip("Autoscale doesn't fix everything, and that's OK.") class TestLinprogIPSparse(LinprogIPTests): options = {"sparse": True, "cholesky": False, "sym_pos": False} @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " "perturbations in linear system solution in " "_linprog_ip._sym_solve.") def test_bug_6139(self): super(TestLinprogIPSparse, self).test_bug_6139() @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') def test_bug_6690(self): super(TestLinprogIPSparse, self).test_bug_6690() def test_magic_square_sparse_no_presolve(self): # test linprog with a problem with a rank-deficient A_eq matrix A_eq, b_eq, c, N = magic_square(3) bounds = (0, 1) with suppress_warnings() as sup: if has_umfpack: sup.filter(UmfpackWarning) sup.filter(MatrixRankWarning, "Matrix is exactly singular") sup.filter(OptimizeWarning, "Solving system with option...") o = {key: self.options[key] for key in self.options} o["presolve"] = False res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_fun=1.730550597) def test_sparse_solve_options(self): # checking that problem is solved with all column permutation options A_eq, b_eq, c, N = magic_square(3) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(OptimizeWarning, "Invalid permc_spec option") o = {key: self.options[key] for key in self.options} permc_specs = ('NATURAL', 'MMD_ATA', 'MMD_AT_PLUS_A', 'COLAMD', 'ekki-ekki-ekki') # 'ekki-ekki-ekki' raises warning about invalid permc_spec option # and uses default for permc_spec in permc_specs: o["permc_spec"] = permc_spec res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=o) _assert_success(res, desired_fun=1.730550597) class TestLinprogIPSparsePresolve(LinprogIPTests): options = {"sparse": True, "_sparse_presolve": True} @pytest.mark.xfail_on_32bit("This test is sensitive to machine epsilon level " "perturbations in linear system solution in " "_linprog_ip._sym_solve.") def test_bug_6139(self): super(TestLinprogIPSparsePresolve, self).test_bug_6139() def test_enzo_example_c_with_infeasibility(self): pytest.skip('_sparse_presolve=True incompatible with presolve=False') @pytest.mark.xfail(reason='Fails with ATLAS, see gh-7877') def test_bug_6690(self): # Test defined in base class, but can't mark as xfail there super(TestLinprogIPSparsePresolve, self).test_bug_6690() class TestLinprogIPSpecific: method = "interior-point" # sparse presolve, sparse after presolve, and dense def test_solver_select(self): # check that default solver is selected as expected if has_cholmod: options = {'sparse': True, 'cholesky': True} elif has_umfpack: options = {'sparse': True, 'cholesky': False} else: options = {'sparse': True, 'cholesky': False, 'sym_pos': False} A, b, c = lpgen_2d(20, 20) res1 = linprog(c, A_ub=A, b_ub=b, method=self.method, options=options) res2 = linprog(c, A_ub=A, b_ub=b, method=self.method) # default solver assert_allclose(res1.fun, res2.fun, err_msg="linprog default solver unexpected result", rtol=1e-15, atol=1e-15) def test_unbounded_below_no_presolve_original(self): # formerly caused segfault in TravisCI w/ "cholesky":True c = [-1] bounds = [(None, 1)] res = linprog(c=c, bounds=bounds, method=self.method, options={"presolve": False, "cholesky": True}) _assert_success(res, desired_fun=-1) def test_cholesky(self): # use cholesky factorization and triangular solves A, b, c = lpgen_2d(20, 20) res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"cholesky": True}) # only for dense _assert_success(res, desired_fun=-64.049494229) def test_alternate_initial_point(self): # use "improved" initial point A, b, c = lpgen_2d(20, 20) with suppress_warnings() as sup: sup.filter(RuntimeWarning, "scipy.linalg.solve\nIll...") sup.filter(OptimizeWarning, "Solving system with option...") sup.filter(LinAlgWarning, "Ill-conditioned matrix...") res = linprog(c, A_ub=A, b_ub=b, method=self.method, options={"ip": True, "disp": True}) # ip code is independent of sparse/dense _assert_success(res, desired_fun=-64.049494229) def test_bug_8664(self): # interior-point has trouble with this when presolve is off c = [4] A_ub = [[2], [5]] b_ub = [4, 4] A_eq = [[0], [-8], [9]] b_eq = [3, 2, 10] with suppress_warnings() as sup: sup.filter(RuntimeWarning) sup.filter(OptimizeWarning, "Solving system with option...") res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options={"presolve": False}) assert_(not res.success, "Incorrectly reported success") ######################################## # Revised Simplex Option-Specific Tests# ######################################## class TestLinprogRSCommon(LinprogRSTests): options = {} def test_cyclic_bland(self): pytest.skip("Intermittent failure acceptable.") def test_nontrivial_problem_with_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_unbounded_variables(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bounds = [(None, None), (None, None), (0, None), (None, None)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bounded_variables(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bounds = [(None, 1), (1, None), (0, None), (.4, .6)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_negative_unbounded_variable(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() b_eq = [4] x_star = np.array([-219/385, 582/385, 0, 4/10]) f_star = 3951/385 bounds = [(None, None), (1, None), (0, None), (.4, .6)] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bad_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bad_guess = [1, 2, 3, .5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=bad_guess) assert_equal(res.status, 6) def test_redundant_constraints_with_guess(self): A, b, c, N = magic_square(3) p = np.random.rand(*c.shape) with suppress_warnings() as sup: sup.filter(OptimizeWarning, "A_eq does not appear...") sup.filter(RuntimeWarning, "invalid value encountered") sup.filter(LinAlgWarning) res = linprog(c, A_eq=A, b_eq=b, method=self.method) res2 = linprog(c, A_eq=A, b_eq=b, method=self.method, x0=res.x) res3 = linprog(c + p, A_eq=A, b_eq=b, method=self.method, x0=res.x) _assert_success(res2, desired_fun=1.730550597) assert_equal(res2.nit, 0) _assert_success(res3) assert_(res3.nit < res.nit) # hot start reduces iterations class TestLinprogRSBland(LinprogRSTests): options = {"pivot": "bland"} ############################################ # HiGHS-Simplex-Dual Option-Specific Tests # ############################################ class TestLinprogHiGHSSimplexDual(LinprogHiGHSTests): method = "highs-ds" options = {} def test_lad_regression(self): c, A_ub, b_ub, bnds = l1_regression_prob() res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds, method=self.method, options=self.options) assert_equal(res.status, 4) assert_('An optimal solution to the scaled ' 'model was found but' in res.message) assert_(res.x is not None) assert_(np.all(res.slack > -1e-6)) assert_(np.all(res.x <= [np.inf if u is None else u for l, u in bnds])) assert_(np.all(res.x >= [-np.inf if l is None else l for l, u in bnds])) ################################### # HiGHS-IPM Option-Specific Tests # ################################### class TestLinprogHiGHSIPM(LinprogHiGHSTests): method = "highs-ipm" options = {} ########################### # Autoscale-Specific Tests# ########################### class AutoscaleTests: options = {"autoscale": True} test_bug_6139 = LinprogCommonTests.test_bug_6139 test_bug_6690 = LinprogCommonTests.test_bug_6690 test_bug_7237 = LinprogCommonTests.test_bug_7237 class TestAutoscaleIP(AutoscaleTests): method = "interior-point" def test_bug_6139(self): self.options['tol'] = 1e-10 return AutoscaleTests.test_bug_6139(self) class TestAutoscaleSimplex(AutoscaleTests): method = "simplex" class TestAutoscaleRS(AutoscaleTests): method = "revised simplex" def test_nontrivial_problem_with_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=x_star) _assert_success(res, desired_fun=f_star, desired_x=x_star) assert_equal(res.nit, 0) def test_nontrivial_problem_with_bad_guess(self): c, A_ub, b_ub, A_eq, b_eq, x_star, f_star = nontrivial_problem() bad_guess = [1, 2, 3, .5] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, method=self.method, options=self.options, x0=bad_guess) assert_equal(res.status, 6) ########################### # Redundancy Removal Tests# ########################### class RRTests: method = "interior-point" LCT = LinprogCommonTests # these are a few of the existing tests that have redundancy test_RR_infeasibility = LCT.test_remove_redundancy_infeasibility test_bug_10349 = LCT.test_bug_10349 test_bug_7044 = LCT.test_bug_7044 test_NFLC = LCT.test_network_flow_limited_capacity test_enzo_example_b = LCT.test_enzo_example_b class TestRRSVD(RRTests): options = {"rr_method": "SVD"} class TestRRPivot(RRTests): options = {"rr_method": "pivot"} class TestRRID(RRTests): options = {"rr_method": "ID"}
true
true
f7039b99de5e7aefe25f080a32f03253b9c3ea2f
3,573
py
Python
tests/unit/agroapi10/test_polygon.py
ChuckVanHoff/pyowm
86735d8629ead2cfa0232b0f8ec0b88ab16eff11
[ "MIT" ]
1
2019-06-01T07:47:12.000Z
2019-06-01T07:47:12.000Z
tests/unit/agroapi10/test_polygon.py
cjsgh901/pyowm
cdd59eb72f32f7238624ceef9b2e2329a5ebd472
[ "MIT" ]
null
null
null
tests/unit/agroapi10/test_polygon.py
cjsgh901/pyowm
cdd59eb72f32f7238624ceef9b2e2329a5ebd472
[ "MIT" ]
1
2020-01-20T22:54:02.000Z
2020-01-20T22:54:02.000Z
import unittest from pyowm.agroapi10.polygon import Polygon, GeoPoint, GeoPolygon class TestPolygon(unittest.TestCase): geopoint= GeoPoint(34, -56.3) geopolygon = GeoPolygon([ [[2.3, 57.32], [23.19, -20.2], [-120.4, 19.15], [2.3, 57.32]] ]) def test_polygon_fails_with_wrong_parameters(self): self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, 123.4, 'user') self.assertRaises(AssertionError, Polygon, 'id', 'polygon', 'wrong', self.geopoint, 123.4, 'user') self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, 'wrong', 123.4, 'user') self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, None, 'user') self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, -77, 'user') def test_area_kilometers_property(self): area_hs = 456.78 expected = area_hs * 0.01 instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, area_hs, 'user') self.assertEqual(expected, instance.area_km) instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, None, 'user') self.assertIsNone(instance.area_km) def test_from_dict(self): _id = "5abb9fb82c8897000bde3e87" name = "Polygon Sample" coords = [121.1867, 37.6739] geopolygon = GeoPolygon([[ [-121.1958, 37.6683], [-121.1779, 37.6687], [-121.1773, 37.6792], [-121.1958, 37.6792], [-121.1958, 37.6683]]]) center = GeoPoint(coords[0], coords[1]) area = 190.6343 user_id = "557066d0ff7a7e3897531d94" the_dict = { "id": _id, "geo_json": { "type": "Feature", "properties": { }, "geometry": { "type": "Polygon", "coordinates": [ [ [-121.1958, 37.6683], [-121.1779, 37.6687], [-121.1773, 37.6792], [-121.1958, 37.6792], [-121.1958, 37.6683] ] ] } }, "name": name, "center": coords, "area": area, "user_id": user_id } expected = Polygon(_id, name, geopolygon, center, area, user_id) result = Polygon.from_dict(the_dict) self.assertEqual(expected.id, result.id) self.assertEqual(expected.name, result.name) self.assertEqual(expected.area, result.area) self.assertEqual(expected.user_id, result.user_id) self.assertEqual(expected.center.lat, result.center.lat) self.assertEqual(expected.center.lon, result.center.lon) self.assertEqual(expected.geopolygon.geojson(), result.geopolygon.geojson()) # now testing with dirty data self.assertRaises(AssertionError, Polygon.from_dict, None) the_dict['center'] = ['no_lon', 'no_lat'] self.assertRaises(ValueError, Polygon.from_dict, the_dict) the_dict['center'] = coords del the_dict['id'] self.assertRaises(AssertionError, Polygon.from_dict, the_dict) def test_repr(self): instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, 1.2, 'user') repr(instance) instance = Polygon('id') repr(instance)
38.836957
114
0.563672
import unittest from pyowm.agroapi10.polygon import Polygon, GeoPoint, GeoPolygon class TestPolygon(unittest.TestCase): geopoint= GeoPoint(34, -56.3) geopolygon = GeoPolygon([ [[2.3, 57.32], [23.19, -20.2], [-120.4, 19.15], [2.3, 57.32]] ]) def test_polygon_fails_with_wrong_parameters(self): self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, 123.4, 'user') self.assertRaises(AssertionError, Polygon, 'id', 'polygon', 'wrong', self.geopoint, 123.4, 'user') self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, 'wrong', 123.4, 'user') self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, None, 'user') self.assertRaises(AssertionError, Polygon, None, 'polygon', self.geopolygon, self.geopoint, -77, 'user') def test_area_kilometers_property(self): area_hs = 456.78 expected = area_hs * 0.01 instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, area_hs, 'user') self.assertEqual(expected, instance.area_km) instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, None, 'user') self.assertIsNone(instance.area_km) def test_from_dict(self): _id = "5abb9fb82c8897000bde3e87" name = "Polygon Sample" coords = [121.1867, 37.6739] geopolygon = GeoPolygon([[ [-121.1958, 37.6683], [-121.1779, 37.6687], [-121.1773, 37.6792], [-121.1958, 37.6792], [-121.1958, 37.6683]]]) center = GeoPoint(coords[0], coords[1]) area = 190.6343 user_id = "557066d0ff7a7e3897531d94" the_dict = { "id": _id, "geo_json": { "type": "Feature", "properties": { }, "geometry": { "type": "Polygon", "coordinates": [ [ [-121.1958, 37.6683], [-121.1779, 37.6687], [-121.1773, 37.6792], [-121.1958, 37.6792], [-121.1958, 37.6683] ] ] } }, "name": name, "center": coords, "area": area, "user_id": user_id } expected = Polygon(_id, name, geopolygon, center, area, user_id) result = Polygon.from_dict(the_dict) self.assertEqual(expected.id, result.id) self.assertEqual(expected.name, result.name) self.assertEqual(expected.area, result.area) self.assertEqual(expected.user_id, result.user_id) self.assertEqual(expected.center.lat, result.center.lat) self.assertEqual(expected.center.lon, result.center.lon) self.assertEqual(expected.geopolygon.geojson(), result.geopolygon.geojson()) self.assertRaises(AssertionError, Polygon.from_dict, None) the_dict['center'] = ['no_lon', 'no_lat'] self.assertRaises(ValueError, Polygon.from_dict, the_dict) the_dict['center'] = coords del the_dict['id'] self.assertRaises(AssertionError, Polygon.from_dict, the_dict) def test_repr(self): instance = Polygon('id', 'polygon', self.geopolygon, self.geopoint, 1.2, 'user') repr(instance) instance = Polygon('id') repr(instance)
true
true
f7039c4f0f970d872e14754af77e13a6dcf2b429
207
py
Python
module10-writing.api.with.flask/utility/rest.py
deepcloudlabs/dcl162-2020-sep-02
abd21c59d89985e9f5922df65fd1a5ccab019de4
[ "MIT" ]
null
null
null
module10-writing.api.with.flask/utility/rest.py
deepcloudlabs/dcl162-2020-sep-02
abd21c59d89985e9f5922df65fd1a5ccab019de4
[ "MIT" ]
null
null
null
module10-writing.api.with.flask/utility/rest.py
deepcloudlabs/dcl162-2020-sep-02
abd21c59d89985e9f5922df65fd1a5ccab019de4
[ "MIT" ]
null
null
null
def convert_request_to_dictionary(request, fields): emp = {} for field in fields: if field in request.json: emp[field] = request.json[field] del emp["identity"] return emp
29.571429
51
0.63285
def convert_request_to_dictionary(request, fields): emp = {} for field in fields: if field in request.json: emp[field] = request.json[field] del emp["identity"] return emp
true
true
f7039caff8ee44c13e6b74d8f1920cff661c5ead
481
py
Python
manuscript/tools/play.py
anterokangas/ManuscriptManagerOld
194bc6c7b899bb4ab61966af3ba1e619fc74c20c
[ "MIT" ]
null
null
null
manuscript/tools/play.py
anterokangas/ManuscriptManagerOld
194bc6c7b899bb4ab61966af3ba1e619fc74c20c
[ "MIT" ]
null
null
null
manuscript/tools/play.py
anterokangas/ManuscriptManagerOld
194bc6c7b899bb4ab61966af3ba1e619fc74c20c
[ "MIT" ]
null
null
null
import os from pydub import playback from playsound import playsound from simpleaudio import play_buffer import winsound from manuscript.tools.counter import Counter def play_sound(sound, block=True): if sound is not None: prefix = "tmp" with Counter(prefix) as counter: tmp_file = os.path.join(".", prefix + f"_{counter:010d}.mp3") sound.export(tmp_file) playsound(tmp_file, block=block) #os.remove(tmp_file)
26.722222
73
0.673597
import os from pydub import playback from playsound import playsound from simpleaudio import play_buffer import winsound from manuscript.tools.counter import Counter def play_sound(sound, block=True): if sound is not None: prefix = "tmp" with Counter(prefix) as counter: tmp_file = os.path.join(".", prefix + f"_{counter:010d}.mp3") sound.export(tmp_file) playsound(tmp_file, block=block)
true
true
f7039ccc97e8de2fedf8a71747166960ca9d7503
429
py
Python
TelegramBot/utility.py
Otkuda/telebot
5bbd901c74340922e349f5ac372ce238eb682877
[ "CC0-1.0" ]
null
null
null
TelegramBot/utility.py
Otkuda/telebot
5bbd901c74340922e349f5ac372ce238eb682877
[ "CC0-1.0" ]
null
null
null
TelegramBot/utility.py
Otkuda/telebot
5bbd901c74340922e349f5ac372ce238eb682877
[ "CC0-1.0" ]
null
null
null
from telegram import ReplyKeyboardMarkup, KeyboardButton def get_keyboard(): contact_button = KeyboardButton('Отправить контакты', request_contact=True) location_button = KeyboardButton('Отправить локацию', request_location=True) my_keyboard = ReplyKeyboardMarkup([['Анекдот', 'Начать'], [contact_button, location_button]], resize_keyboard=True) return my_keyboard
47.666667
97
0.706294
from telegram import ReplyKeyboardMarkup, KeyboardButton def get_keyboard(): contact_button = KeyboardButton('Отправить контакты', request_contact=True) location_button = KeyboardButton('Отправить локацию', request_location=True) my_keyboard = ReplyKeyboardMarkup([['Анекдот', 'Начать'], [contact_button, location_button]], resize_keyboard=True) return my_keyboard
true
true
f7039eb952f4df789dbf533948f40ffcbf2abfb4
28,246
py
Python
python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/torch_ods_gen.py
llvm/torch-mlir
2b1b0f6e1970c9db13caea2515070c61d4dee167
[ "Apache-2.0" ]
213
2021-09-24T03:26:53.000Z
2022-03-30T07:11:48.000Z
python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/torch_ods_gen.py
llvm/torch-mlir
2b1b0f6e1970c9db13caea2515070c61d4dee167
[ "Apache-2.0" ]
247
2021-09-23T18:49:45.000Z
2022-03-31T17:19:02.000Z
python/torch_mlir/dialects/torch/importer/jit_ir/build_tools/torch_ods_gen.py
llvm/torch-mlir
2b1b0f6e1970c9db13caea2515070c61d4dee167
[ "Apache-2.0" ]
68
2021-09-23T18:23:20.000Z
2022-03-29T11:18:58.000Z
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # Also available under a BSD-style license. See LICENSE. """Queries the pytorch op registry and generates ODS and CC sources for the ops. """ from typing import List, Optional, TextIO import argparse import logging import os import sys from .utils import TextEmitter from .registry import Registry, JitOperator # Mapping from torch types to their corresponding ODS type predicates. # Use `get_ods_type` instead of using this directly. TORCH_TYPE_TO_ODS_TYPE = { "Tensor": "AnyTorchTensorType", "Tensor?": "AnyTorchOptionalTensorType", "Tensor?[]": "AnyTorchListOfOptionalTensorType", "Tensor[]": "AnyTorchListOfTensorType", "Scalar": "AnyTorchScalarType", "Scalar?": "AnyTorchOptionalScalarType", "int": "Torch_IntType", "int[]": "AnyTorchListOfTorchIntType", "int?": "AnyTorchOptionalIntType", "int[]?": "AnyTorchOptionalListOfTorchIntType", "bool": "Torch_BoolType", "bool[]": "AnyTorchListOfTorchBoolType", "bool?": "AnyTorchOptionalBoolType", "float": "Torch_FloatType", "float?": "AnyTorchOptionalFloatType", "t[]": "AnyTorchListType", "t": "AnyTorchType", "t1": "AnyTorchType", "t2": "AnyTorchType", "Any": "AnyTorchType", "Device": "Torch_DeviceType", "Device?": "AnyTorchOptionalDeviceType", "Generator": "Torch_GeneratorType", "Generator?": "AnyTorchOptionalGeneratorType", "str": "Torch_StringType", "str?": "AnyTorchOptionalStringType", "str[]": "AnyTorchListOfTorchStringType", "Dict": "Torch_DictType", "__torch__.torch.classes.quantized.LinearPackedParamsBase": "Torch_LinearParamsType", } def get_ods_type(type: str): # TODO: Increase precision on dict type modeling. if type.startswith("Dict("): type = "Dict" ods_type = TORCH_TYPE_TO_ODS_TYPE.get(type) if ods_type is None: raise Exception( f"{type!r} not in TORCH_TYPE_TO_ODS_TYPE mapping. Please add it!") return ods_type def _get_main_module_name() -> str: # pytype: disable=attribute-error return sys.modules["__main__"].__loader__.name # pytype: enable=attribute-error ODS_BANNER = f"""//===-------------------------------------------------------*- tablegen -*-===// // // This file is licensed under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // Also available under a BSD-style license. See LICENSE. // // Operation summaries and descriptions were systematically derived from public // API docstrings and are licensed accordingly: // https://github.com/pytorch/pytorch/blob/master/LICENSE //===----------------------------------------------------------------------===// // // This file is automatically generated. Please do not edit. // Generated via: // ``` // python -m {_get_main_module_name()} // ``` // //===----------------------------------------------------------------------===// """ def raw_emit_op(operator: JitOperator, emitter_td: TextEmitter, *, traits: List[str], has_folder: bool, has_canonicalizer: bool): """Emit the ODS for a JitOperator to a textual file. This is the lowest level of emission and is responsible for low-level textual emission details. This function should not have any "smarts" for deducing traits/etc. You probably don't want to call this directly. """ p_td = lambda *args: emitter_td.print(*args) op_name, cpp_class_name = operator.get_mlir_names() # Generate unique result names for ops with nameless results multiple_results = len(operator.returns) > 1 def generic_result_name(i): return "result" + (str(i) if multiple_results else "") p_td( f"def Torch_{cpp_class_name} : Torch_Op<{emitter_td.quote(op_name)}, [") with emitter_td.indent(): with emitter_td.indent(): p_td(",\n".join(traits)) p_td("]> {") with emitter_td.indent(): summary = f"Generated op for `{operator.unique_key}`" p_td(f"let summary = {emitter_td.quote(summary)};") p_td(f"let arguments = (ins") with emitter_td.indent(): if operator.is_vararg: p_td("Variadic<AnyTorchType>:$operands") else: p_td(",\n".join([ f"""{get_ods_type(arg["type"])}:${arg["name"]}""" for arg in operator.arguments ])) p_td(");") p_td(f"let results = (outs") with emitter_td.indent(): if operator.is_varret: p_td("Variadic<AnyTorchType>:$results") else: p_td(",\n".join([ f"""{get_ods_type(ret["type"])}:${ret["name"] or generic_result_name(e)}""" for e, ret in enumerate(operator.returns) ])) p_td(");") if operator.is_vararg or operator.is_varret: if operator.is_vararg: assembly_operands = "`(` $operands `)`" assembly_operand_types = "qualified(type($operands))" else: assembly_operands = " `,` ".join("$" + arg["name"] for arg in operator.arguments) assembly_operand_types = " `,` ".join( f"""qualified(type(${arg["name"]}))""" for arg in operator.arguments) if operator.is_varret: assembly_result_types = "qualified(type($results))" else: assembly_result_types = " `,` ".join( f"""qualified(type(${ret["name"] or generic_result_name(e)}))""" for e, ret in enumerate(operator.returns)) if assembly_operand_types and assembly_result_types: maybe_arrow = " `->` " else: maybe_arrow = "" assembly_format = f"{assembly_operands} attr-dict `:` {assembly_operand_types}{maybe_arrow}{assembly_result_types}" p_td(f"let assemblyFormat = {emitter_td.quote(assembly_format)};") else: p_td(f"let hasCustomAssemblyFormat = 1;") p_td(f"""let extraClassDefinition = [{{ ParseResult {cpp_class_name}::parse(OpAsmParser &parser, OperationState &result) {{ return parseDefaultTorchOp(parser, result, {len(operator.arguments)}, {len(operator.returns)}); }} void {cpp_class_name}::print(OpAsmPrinter &printer) {{ printDefaultTorchOp(printer, *this, {len(operator.arguments)}, {len(operator.returns)}); }} }}]; """) if has_folder: p_td("let hasFolder = 1;") if has_canonicalizer: p_td("let hasCanonicalizer = 1;") p_td("}") p_td("\n") def emit_op(operator: JitOperator, emitter_td: TextEmitter, *, traits: Optional[List[str]] = None, has_folder: bool = False, has_canonicalizer: bool = False): """Main entry point for op emission. Besides emitting the op, it deduces / adds traits based on the operator information. """ if traits is None: traits = [] # All Torch operators allow type refinement. traits += ["AllowsTypeRefinement"] if operator.has_value_semantics(): traits += ["HasValueSemantics"] if operator.is_readonly(): traits += ["ReadOnly"] raw_emit_op(operator, emitter_td, traits=traits, has_folder=has_folder, has_canonicalizer=has_canonicalizer) def emit_ops(emitter_td: TextEmitter, registry: Registry): def emit(key, **kwargs): emit_op(registry[key], emitter_td, **kwargs) def emit_with_mutating_variants(key, **kwargs): operator = registry[key] emit_op(operator, emitter_td, **kwargs) ns, unqual, overload = operator.triple emit_op(registry.get_by_triple((ns, unqual + "_", overload)), emitter_td, traits=["IsTrailingUnderscoreInplaceVariant"]) # ========================================================================== # `aten::` namespace. # ========================================================================== # Elementwise tensor compute ops for key in [ "aten::tanh : (Tensor) -> (Tensor)", "aten::hardtanh : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::relu : (Tensor) -> (Tensor)", "aten::leaky_relu : (Tensor, Scalar) -> (Tensor)", "aten::log : (Tensor) -> (Tensor)", "aten::sigmoid : (Tensor) -> (Tensor)", "aten::hardsigmoid : (Tensor) -> (Tensor)", "aten::hardswish : (Tensor) -> (Tensor)", "aten::erf : (Tensor) -> (Tensor)", "aten::silu : (Tensor) -> (Tensor)", "aten::sin : (Tensor) -> (Tensor)", "aten::exp : (Tensor) -> (Tensor)", "aten::cos : (Tensor) -> (Tensor)", "aten::neg : (Tensor) -> (Tensor)", "aten::floor : (Tensor) -> (Tensor)", "aten::ceil : (Tensor) -> (Tensor)", "aten::bitwise_not : (Tensor) -> (Tensor)", "aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)", "aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)", "aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::div.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::lerp.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)", "aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::gt.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::lt.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::ne.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::div.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::le.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)", "aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)", "aten::log2 : (Tensor) -> (Tensor)", "aten::rsqrt : (Tensor) -> (Tensor)", "aten::abs : (Tensor) -> (Tensor)", "aten::reciprocal : (Tensor) -> (Tensor)", "aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::square : (Tensor) -> (Tensor)", ]: emit_with_mutating_variants(key) # Elementwise tensor compute ops that don't have the standard mutating # variants. emit("aten::addcmul : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)") emit("aten::addcdiv : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)") emit("aten::maximum : (Tensor, Tensor) -> (Tensor)") emit("aten::minimum : (Tensor, Tensor) -> (Tensor)") emit("aten::rsub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)") emit("aten::gelu : (Tensor, str) -> (Tensor)") emit("aten::pow.Tensor_Scalar : (Tensor, Scalar) -> (Tensor)") emit("aten::threshold_backward : (Tensor, Tensor, Scalar) -> (Tensor)") # Ops without value semantics but the corresponding without trailing # underscore variant doesn't exist. emit("aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)") emit("aten::uniform_ : (Tensor, float, float, Generator?) -> (Tensor)") emit("aten::rand_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::bernoulli : (Tensor, Generator?) -> (Tensor)") emit("aten::bernoulli_.float : (Tensor, float, Generator?) -> (Tensor)") emit("aten::bernoulli_.Tensor : (Tensor, Tensor, Generator?) -> (Tensor)") emit_with_mutating_variants("aten::triu : (Tensor, int) -> (Tensor)") emit_with_mutating_variants( "aten::index_put : (Tensor, Tensor?[], Tensor, bool) -> (Tensor)") emit_with_mutating_variants( "aten::index_put.hacked_twin : (Tensor, Tensor[], Tensor, bool) -> (Tensor)") # Non-elementwise tensor compute ops emit("aten::linear : (Tensor, Tensor, Tensor?) -> (Tensor)") emit("aten::mm : (Tensor, Tensor) -> (Tensor)") emit("aten::addmm : (Tensor, Tensor, Tensor, Scalar, Scalar) -> (Tensor)") emit("aten::matmul : (Tensor, Tensor) -> (Tensor)") emit( "aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)" ) emit("aten::convolution : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)") emit("aten::convolution_overrideable : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)") emit("aten::flip : (Tensor, int[]) -> (Tensor)") emit( "aten::native_batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float) -> (Tensor, Tensor, Tensor)" ) emit( "aten::batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float, bool) -> (Tensor)" ) emit( "aten::layer_norm : (Tensor, int[], Tensor?, Tensor?, float, bool) -> (Tensor)" ) emit( "aten::native_layer_norm : (Tensor, int[], Tensor?, Tensor?, float) -> (Tensor, Tensor, Tensor)" ) emit( "aten::max_pool2d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)" ) emit( "aten::max_pool2d_with_indices : (Tensor, int[], int[], int[], int[], bool) -> (Tensor, Tensor)" ) emit( "aten::max_pool2d_with_indices_backward : (Tensor, Tensor, int[], int[], int[], int[], bool, Tensor) -> (Tensor)" ) emit( "aten::avg_pool2d : (Tensor, int[], int[], int[], bool, bool, int?) -> (Tensor)" ) emit( "aten::softmax.int : (Tensor, int, int?) -> (Tensor)" ) emit( "aten::log_softmax.int : (Tensor, int, int?) -> (Tensor)" ) emit( "aten::_log_softmax : (Tensor, int, bool) -> (Tensor)" ) emit("aten::adaptive_avg_pool2d : (Tensor, int[]) -> (Tensor)") emit("aten::topk : (Tensor, int, int, bool, bool) -> (Tensor, Tensor)") emit("aten::transpose.int : (Tensor, int, int) -> (Tensor)") emit("aten::permute : (Tensor, int[]) -> (Tensor)") emit("aten::bmm : (Tensor, Tensor) -> (Tensor)") emit("aten::cumsum : (Tensor, int, int?) -> (Tensor)") emit("aten::floor_divide.Scalar : (Tensor, Scalar) -> (Tensor)") emit("aten::logsumexp : (Tensor, int[], bool) -> (Tensor)") emit("aten::mean.dim : (Tensor, int[], bool, int?) -> (Tensor)") emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)") emit("aten::sqrt : (Tensor) -> (Tensor)") emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)") emit("aten::mean : (Tensor, int?) -> (Tensor)") emit("aten::std : (Tensor, bool) -> (Tensor)") emit("aten::var : (Tensor, bool) -> (Tensor)") emit("aten::nll_loss_forward : (Tensor, Tensor, Tensor?, int, int) -> (Tensor, Tensor)") emit("aten::nll_loss_backward : (Tensor, Tensor, Tensor, Tensor?, int, int, Tensor) -> (Tensor)") emit("aten::bincount : (Tensor, Tensor?, int) -> (Tensor)") # Misc tensor ops. emit("aten::constant_pad_nd : (Tensor, int[], Scalar) -> (Tensor)") emit("aten::pad : (Tensor, int[], str, float?) -> (Tensor)") emit("aten::squeeze.dim : (Tensor, int) -> (Tensor)", has_folder=True) emit("aten::unsqueeze : (Tensor, int) -> (Tensor)") emit("aten::squeeze : (Tensor) -> (Tensor)", has_folder=True) emit("aten::flatten.using_ints : (Tensor, int, int) -> (Tensor)") emit("aten::dim : (Tensor) -> (int)", has_folder=True) emit("aten::size : (Tensor) -> (int[])", has_canonicalizer=True) emit("aten::Bool.Tensor : (Tensor) -> (bool)") emit("aten::ones : (int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::new_ones : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::zeros : (int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::zero_ : (Tensor) -> (Tensor)") emit("aten::new_zeros : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::tensor : (t[], int?, Device?, bool) -> (Tensor)") emit("aten::tensor.bool : (bool, int?, Device?, bool) -> (Tensor)") emit("aten::tensor.int : (int, int?, Device?, bool) -> (Tensor)") emit("aten::_shape_as_tensor : (Tensor) -> (Tensor)") emit("aten::all : (Tensor) -> (Tensor)") emit("aten::any : (Tensor) -> (Tensor)") emit("aten::any.dim : (Tensor, int, bool) -> (Tensor)") emit("aten::arange : (Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::arange.start : (Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::arange.start_step : (Scalar, Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::argmax : (Tensor, int?, bool) -> (Tensor)") emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)") emit("aten::clone : (Tensor, int?) -> (Tensor)") emit("aten::contiguous : (Tensor, int) -> (Tensor)") emit("aten::copy_ : (Tensor, Tensor, bool) -> (Tensor)") emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)") emit("aten::detach : (Tensor) -> (Tensor)") emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)") emit("aten::empty_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::new_empty : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::zeros_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::ones_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::empty.memory_format : (int[], int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::expand : (Tensor, int[], bool) -> (Tensor)") emit("aten::expand_as : (Tensor, Tensor) -> (Tensor)") emit("aten::broadcast_to : (Tensor, int[]) -> (Tensor)") emit("aten::index.Tensor : (Tensor, Tensor?[]) -> (Tensor)") emit("aten::index_select : (Tensor, int, Tensor) -> (Tensor)") emit("aten::_index_put_impl_ : (Tensor, Tensor?[], Tensor, bool, bool) -> (Tensor)") emit("aten::item : (Tensor) -> (Scalar)") emit("aten::masked_select : (Tensor, Tensor) -> (Tensor)") emit("aten::numel : (Tensor) -> (int)") emit("aten::repeat : (Tensor, int[]) -> (Tensor)") emit("aten::reshape : (Tensor, int[]) -> (Tensor)") emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)") emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)") emit("aten::select.int : (Tensor, int, int) -> (Tensor)") emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True) emit("aten::stack : (Tensor[], int) -> (Tensor)") emit("aten::sum : (Tensor, int?) -> (Tensor)") emit("aten::sum.dim_IntList : (Tensor, int[], bool, int?) -> (Tensor)") emit("aten::max : (Tensor) -> (Tensor)") emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)") emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True) emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True) emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)") emit("aten::to.prim_Device : (Tensor, Device?, int?, bool, bool) -> (Tensor)") emit("aten::type_as : (Tensor, Tensor) -> (Tensor)") emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True) emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)") emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)") emit("aten::where.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)") emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)") emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)") emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)") emit("aten::len.Tensor : (Tensor) -> (int)") emit("aten::cpu : (Tensor) -> (Tensor)") emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)") emit("aten::IntImplicit : (Tensor) -> (int)") emit("aten::tensor.float : (float, int?, Device?, bool) -> (Tensor)") emit("aten::Int.Tensor : (Tensor) -> (int)", has_folder=True) emit("aten::Float.Tensor : (Tensor) -> (float)", has_folder=True) emit_with_mutating_variants("aten::dropout : (Tensor, float, bool) -> (Tensor)") emit("aten::t : (Tensor) -> (Tensor)") emit("aten::full : (int[], Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::full_like : (Tensor, Scalar, int?, int?, Device?, bool?, int?) -> (Tensor)") # Dict ops. emit("aten::__contains__.str : (Dict(str, t), str) -> (bool)", has_folder=True) emit("aten::__getitem__.Dict_str : (Dict(str, t), str) -> (t)", has_folder=True) emit("aten::_set_item.str : (Dict(str, t), str, t) -> ()") emit("aten::keys.str : (Dict(str, t)) -> (str[])") emit("aten::get.default_str : (Dict(str, t), str, t) -> (t)") emit("aten::Delete.Dict_str : (Dict(str, t), str) -> ()") # List ops. emit("aten::cat : (Tensor[], int) -> (Tensor)") emit("aten::append.t : (t[], t) -> (t[])") emit("aten::add.t : (t[], t[]) -> (t[])") emit("aten::eq.int_list : (int[], int[]) -> (bool)", has_folder=True) emit("aten::list.t : (t[]) -> (t[])") emit("aten::slice.t : (t[], int?, int?, int) -> (t[])") emit("aten::insert.t : (t[], int, t) -> ()") emit("aten::ne.int_list : (int[], int[]) -> (bool)") # Str ops. emit("aten::add.str : (str, str) -> (str)") emit("aten::eq.str : (str, str) -> (bool)", has_folder=True) emit("aten::str : (t) -> (str)") emit("aten::format : (...) -> (str)") emit("aten::join : (str, str[]) -> (str)") # Type conversion ops. emit("aten::Float.Scalar : (Scalar) -> (float)", has_folder=True) emit("aten::Float.str : (str) -> (float)") emit("aten::Int.float : (float) -> (int)") # Primitive ops emit("aten::__range_length : (int, int, int) -> (int)", has_folder=True) emit("aten::__derive_index : (int, int, int) -> (int)", has_folder=True) emit("aten::gt.int : (int, int) -> (bool)", has_folder=True) emit("aten::ge.int : (int, int) -> (bool)", has_folder=True) emit("aten::lt.int : (int, int) -> (bool)", has_folder=True) emit("aten::le.int : (int, int) -> (bool)", has_folder=True) emit("aten::ne.int : (int, int) -> (bool)", has_folder=True) emit("aten::eq.int : (int, int) -> (bool)", has_folder=True) emit("aten::floordiv.int : (int, int) -> (int)", has_folder=True) emit("aten::remainder.int : (int, int) -> (int)", has_folder=True) emit("aten::add.int : (int, int) -> (int)", has_folder=True) emit("aten::sub.int : (int, int) -> (int)", has_folder=True) emit("aten::mul.int : (int, int) -> (int)", has_folder=True) emit("aten::neg.int : (int) -> (int)", has_folder=True) emit("aten::log.int : (int) -> (float)") emit("aten::add.float_int : (float, int) -> (float)") emit("aten::sub.float : (float, float) -> (float)") emit("aten::mul.float : (float, float) -> (float)") emit("aten::div.float : (float, float) -> (float)", has_folder=True) emit("aten::neg.float : (float) -> (float)") emit("aten::eq.float : (float, float) -> (bool)", has_folder=True) emit("aten::gt.float : (float, float) -> (bool)", has_folder=True) emit("aten::ge.float : (float, float) -> (bool)", has_folder=True) emit("aten::lt.float : (float, float) -> (bool)", has_folder=True) emit("aten::lt.float_int : (float, int) -> (bool)") emit("aten::ge.float_int : (float, int) -> (bool)") emit("aten::ne.float_int : (float, int) -> (bool)") emit("aten::gt.float_int : (float, int) -> (bool)") emit("aten::__and__.bool : (bool, bool) -> (bool)") emit("aten::ne.bool : (bool, bool) -> (bool)", has_folder=True) emit("aten::__is__ : (t1, t2) -> (bool)", has_folder=True) emit("aten::__isnot__ : (t1, t2) -> (bool)", has_folder=True) emit("aten::__not__ : (bool) -> (bool)", has_folder=True) emit("aten::len.t : (t[]) -> (int)", has_folder=True, has_canonicalizer=True) emit("aten::__getitem__.t : (t[], int) -> (t)", has_canonicalizer=True) emit("aten::_set_item.t : (t[], int, t) -> (t[])") emit("aten::div : (Scalar, Scalar) -> (float)") emit("aten::add : (Scalar, Scalar) -> (Scalar)") emit("aten::eq.device : (Device, Device) -> (bool)") emit("aten::ceil.float : (float) -> (int)", has_folder=True) # backprop ops emit("aten::_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)") emit("aten::tanh_backward : (Tensor, Tensor) -> (Tensor)") emit("aten::gelu_backward : (Tensor, Tensor, str) -> (Tensor)") emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)") # ========================================================================== # `prim::` namespace. # ========================================================================== emit("prim::layout : (Tensor) -> (int)") emit("prim::TupleIndex : (Any, int) -> (Any)", has_canonicalizer=True) emit("prim::device : (Tensor) -> (Device)") emit("prim::dtype : (Tensor) -> (int)", has_folder=True) emit("prim::TupleUnpack : (Any) -> (...)", has_canonicalizer=True) emit("prim::NumToTensor.Scalar : (Scalar) -> (Tensor)") emit("prim::min.self_int : (int[]) -> (int)", has_folder=True) emit("prim::min.int : (int, int) -> (int)") emit("prim::max.self_int : (int[]) -> (int)") emit("prim::max.int : (int, int) -> (int)", has_folder=True) emit("prim::RaiseException : (str, str?) -> ()") emit("prim::Uninitialized : () -> (Any)", has_canonicalizer=True, traits=["NoSideEffect"]) emit("prim::unchecked_cast : (t) -> (t)", has_folder=True, traits=["DeclareOpInterfaceMethods<CastOpInterface>"]) emit("prim::Print : (...) -> ()") emit("prim::tolist : (...) -> (...)") emit("prim::abs.Scalar : (Scalar) -> (Scalar)") # ========================================================================== # `quantized::` namespace. # ========================================================================== emit( "quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)", traits=["HasValueSemantics"]) def dump_registered_ops(outfile: TextIO, registry: Registry): for _, v in sorted(registry.by_unique_key.items()): outfile.write(repr(v)) def main(args: argparse.Namespace): registry = Registry.load() if args.debug_registry_dump: with open(args.debug_registry_dump, "w") as debug_registry_dump: dump_registered_ops(debug_registry_dump, registry) td_path = os.path.join(args.torch_ir_include_dir, "GeneratedTorchOps.td") with open(td_path, "w") as f_td: emitter_td = TextEmitter(f_td) emitter_td.print(ODS_BANNER) emit_ops(emitter_td, registry) def _create_argparse() -> argparse.ArgumentParser: parser = argparse.ArgumentParser(prog="generate_ods") parser.add_argument( "--torch_ir_include_dir", required=True, help="Directory in include/ containing the Torch dialect") parser.add_argument( "--debug_registry_dump", help="File to dump the the PyTorch JIT operator registry into") return parser if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = _create_argparse() args = parser.parse_args() main(args)
47.234114
128
0.568434
from typing import List, Optional, TextIO import argparse import logging import os import sys from .utils import TextEmitter from .registry import Registry, JitOperator TORCH_TYPE_TO_ODS_TYPE = { "Tensor": "AnyTorchTensorType", "Tensor?": "AnyTorchOptionalTensorType", "Tensor?[]": "AnyTorchListOfOptionalTensorType", "Tensor[]": "AnyTorchListOfTensorType", "Scalar": "AnyTorchScalarType", "Scalar?": "AnyTorchOptionalScalarType", "int": "Torch_IntType", "int[]": "AnyTorchListOfTorchIntType", "int?": "AnyTorchOptionalIntType", "int[]?": "AnyTorchOptionalListOfTorchIntType", "bool": "Torch_BoolType", "bool[]": "AnyTorchListOfTorchBoolType", "bool?": "AnyTorchOptionalBoolType", "float": "Torch_FloatType", "float?": "AnyTorchOptionalFloatType", "t[]": "AnyTorchListType", "t": "AnyTorchType", "t1": "AnyTorchType", "t2": "AnyTorchType", "Any": "AnyTorchType", "Device": "Torch_DeviceType", "Device?": "AnyTorchOptionalDeviceType", "Generator": "Torch_GeneratorType", "Generator?": "AnyTorchOptionalGeneratorType", "str": "Torch_StringType", "str?": "AnyTorchOptionalStringType", "str[]": "AnyTorchListOfTorchStringType", "Dict": "Torch_DictType", "__torch__.torch.classes.quantized.LinearPackedParamsBase": "Torch_LinearParamsType", } def get_ods_type(type: str): if type.startswith("Dict("): type = "Dict" ods_type = TORCH_TYPE_TO_ODS_TYPE.get(type) if ods_type is None: raise Exception( f"{type!r} not in TORCH_TYPE_TO_ODS_TYPE mapping. Please add it!") return ods_type def _get_main_module_name() -> str: return sys.modules["__main__"].__loader__.name ODS_BANNER = f"""//===-------------------------------------------------------*- tablegen -*-===// // // This file is licensed under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // Also available under a BSD-style license. See LICENSE. // // Operation summaries and descriptions were systematically derived from public // API docstrings and are licensed accordingly: // https://github.com/pytorch/pytorch/blob/master/LICENSE //===----------------------------------------------------------------------===// // // This file is automatically generated. Please do not edit. // Generated via: // ``` // python -m {_get_main_module_name()} // ``` // //===----------------------------------------------------------------------===// """ def raw_emit_op(operator: JitOperator, emitter_td: TextEmitter, *, traits: List[str], has_folder: bool, has_canonicalizer: bool): p_td = lambda *args: emitter_td.print(*args) op_name, cpp_class_name = operator.get_mlir_names() multiple_results = len(operator.returns) > 1 def generic_result_name(i): return "result" + (str(i) if multiple_results else "") p_td( f"def Torch_{cpp_class_name} : Torch_Op<{emitter_td.quote(op_name)}, [") with emitter_td.indent(): with emitter_td.indent(): p_td(",\n".join(traits)) p_td("]> {") with emitter_td.indent(): summary = f"Generated op for `{operator.unique_key}`" p_td(f"let summary = {emitter_td.quote(summary)};") p_td(f"let arguments = (ins") with emitter_td.indent(): if operator.is_vararg: p_td("Variadic<AnyTorchType>:$operands") else: p_td(",\n".join([ f"""{get_ods_type(arg["type"])}:${arg["name"]}""" for arg in operator.arguments ])) p_td(");") p_td(f"let results = (outs") with emitter_td.indent(): if operator.is_varret: p_td("Variadic<AnyTorchType>:$results") else: p_td(",\n".join([ f"""{get_ods_type(ret["type"])}:${ret["name"] or generic_result_name(e)}""" for e, ret in enumerate(operator.returns) ])) p_td(");") if operator.is_vararg or operator.is_varret: if operator.is_vararg: assembly_operands = "`(` $operands `)`" assembly_operand_types = "qualified(type($operands))" else: assembly_operands = " `,` ".join("$" + arg["name"] for arg in operator.arguments) assembly_operand_types = " `,` ".join( f"""qualified(type(${arg["name"]}))""" for arg in operator.arguments) if operator.is_varret: assembly_result_types = "qualified(type($results))" else: assembly_result_types = " `,` ".join( f"""qualified(type(${ret["name"] or generic_result_name(e)}))""" for e, ret in enumerate(operator.returns)) if assembly_operand_types and assembly_result_types: maybe_arrow = " `->` " else: maybe_arrow = "" assembly_format = f"{assembly_operands} attr-dict `:` {assembly_operand_types}{maybe_arrow}{assembly_result_types}" p_td(f"let assemblyFormat = {emitter_td.quote(assembly_format)};") else: p_td(f"let hasCustomAssemblyFormat = 1;") p_td(f"""let extraClassDefinition = [{{ ParseResult {cpp_class_name}::parse(OpAsmParser &parser, OperationState &result) {{ return parseDefaultTorchOp(parser, result, {len(operator.arguments)}, {len(operator.returns)}); }} void {cpp_class_name}::print(OpAsmPrinter &printer) {{ printDefaultTorchOp(printer, *this, {len(operator.arguments)}, {len(operator.returns)}); }} }}]; """) if has_folder: p_td("let hasFolder = 1;") if has_canonicalizer: p_td("let hasCanonicalizer = 1;") p_td("}") p_td("\n") def emit_op(operator: JitOperator, emitter_td: TextEmitter, *, traits: Optional[List[str]] = None, has_folder: bool = False, has_canonicalizer: bool = False): if traits is None: traits = [] traits += ["AllowsTypeRefinement"] if operator.has_value_semantics(): traits += ["HasValueSemantics"] if operator.is_readonly(): traits += ["ReadOnly"] raw_emit_op(operator, emitter_td, traits=traits, has_folder=has_folder, has_canonicalizer=has_canonicalizer) def emit_ops(emitter_td: TextEmitter, registry: Registry): def emit(key, **kwargs): emit_op(registry[key], emitter_td, **kwargs) def emit_with_mutating_variants(key, **kwargs): operator = registry[key] emit_op(operator, emitter_td, **kwargs) ns, unqual, overload = operator.triple emit_op(registry.get_by_triple((ns, unqual + "_", overload)), emitter_td, traits=["IsTrailingUnderscoreInplaceVariant"]) for key in [ "aten::tanh : (Tensor) -> (Tensor)", "aten::hardtanh : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::relu : (Tensor) -> (Tensor)", "aten::leaky_relu : (Tensor, Scalar) -> (Tensor)", "aten::log : (Tensor) -> (Tensor)", "aten::sigmoid : (Tensor) -> (Tensor)", "aten::hardsigmoid : (Tensor) -> (Tensor)", "aten::hardswish : (Tensor) -> (Tensor)", "aten::erf : (Tensor) -> (Tensor)", "aten::silu : (Tensor) -> (Tensor)", "aten::sin : (Tensor) -> (Tensor)", "aten::exp : (Tensor) -> (Tensor)", "aten::cos : (Tensor) -> (Tensor)", "aten::neg : (Tensor) -> (Tensor)", "aten::floor : (Tensor) -> (Tensor)", "aten::ceil : (Tensor) -> (Tensor)", "aten::bitwise_not : (Tensor) -> (Tensor)", "aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)", "aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)", "aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::div.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::lerp.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)", "aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::gt.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::lt.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::ne.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::div.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::le.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)", "aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)", "aten::log2 : (Tensor) -> (Tensor)", "aten::rsqrt : (Tensor) -> (Tensor)", "aten::abs : (Tensor) -> (Tensor)", "aten::reciprocal : (Tensor) -> (Tensor)", "aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)", "aten::square : (Tensor) -> (Tensor)", ]: emit_with_mutating_variants(key) # variants. emit("aten::addcmul : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)") emit("aten::addcdiv : (Tensor, Tensor, Tensor, Scalar) -> (Tensor)") emit("aten::maximum : (Tensor, Tensor) -> (Tensor)") emit("aten::minimum : (Tensor, Tensor) -> (Tensor)") emit("aten::rsub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)") emit("aten::gelu : (Tensor, str) -> (Tensor)") emit("aten::pow.Tensor_Scalar : (Tensor, Scalar) -> (Tensor)") emit("aten::threshold_backward : (Tensor, Tensor, Scalar) -> (Tensor)") # Ops without value semantics but the corresponding without trailing # underscore variant doesn't exist. emit("aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)") emit("aten::uniform_ : (Tensor, float, float, Generator?) -> (Tensor)") emit("aten::rand_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::bernoulli : (Tensor, Generator?) -> (Tensor)") emit("aten::bernoulli_.float : (Tensor, float, Generator?) -> (Tensor)") emit("aten::bernoulli_.Tensor : (Tensor, Tensor, Generator?) -> (Tensor)") emit_with_mutating_variants("aten::triu : (Tensor, int) -> (Tensor)") emit_with_mutating_variants( "aten::index_put : (Tensor, Tensor?[], Tensor, bool) -> (Tensor)") emit_with_mutating_variants( "aten::index_put.hacked_twin : (Tensor, Tensor[], Tensor, bool) -> (Tensor)") emit("aten::linear : (Tensor, Tensor, Tensor?) -> (Tensor)") emit("aten::mm : (Tensor, Tensor) -> (Tensor)") emit("aten::addmm : (Tensor, Tensor, Tensor, Scalar, Scalar) -> (Tensor)") emit("aten::matmul : (Tensor, Tensor) -> (Tensor)") emit( "aten::conv2d : (Tensor, Tensor, Tensor?, int[], int[], int[], int) -> (Tensor)" ) emit("aten::convolution : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)") emit("aten::convolution_overrideable : (Tensor, Tensor, Tensor?, int[], int[], int[], bool, int[], int) -> (Tensor)") emit("aten::flip : (Tensor, int[]) -> (Tensor)") emit( "aten::native_batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float) -> (Tensor, Tensor, Tensor)" ) emit( "aten::batch_norm : (Tensor, Tensor?, Tensor?, Tensor?, Tensor?, bool, float, float, bool) -> (Tensor)" ) emit( "aten::layer_norm : (Tensor, int[], Tensor?, Tensor?, float, bool) -> (Tensor)" ) emit( "aten::native_layer_norm : (Tensor, int[], Tensor?, Tensor?, float) -> (Tensor, Tensor, Tensor)" ) emit( "aten::max_pool2d : (Tensor, int[], int[], int[], int[], bool) -> (Tensor)" ) emit( "aten::max_pool2d_with_indices : (Tensor, int[], int[], int[], int[], bool) -> (Tensor, Tensor)" ) emit( "aten::max_pool2d_with_indices_backward : (Tensor, Tensor, int[], int[], int[], int[], bool, Tensor) -> (Tensor)" ) emit( "aten::avg_pool2d : (Tensor, int[], int[], int[], bool, bool, int?) -> (Tensor)" ) emit( "aten::softmax.int : (Tensor, int, int?) -> (Tensor)" ) emit( "aten::log_softmax.int : (Tensor, int, int?) -> (Tensor)" ) emit( "aten::_log_softmax : (Tensor, int, bool) -> (Tensor)" ) emit("aten::adaptive_avg_pool2d : (Tensor, int[]) -> (Tensor)") emit("aten::topk : (Tensor, int, int, bool, bool) -> (Tensor, Tensor)") emit("aten::transpose.int : (Tensor, int, int) -> (Tensor)") emit("aten::permute : (Tensor, int[]) -> (Tensor)") emit("aten::bmm : (Tensor, Tensor) -> (Tensor)") emit("aten::cumsum : (Tensor, int, int?) -> (Tensor)") emit("aten::floor_divide.Scalar : (Tensor, Scalar) -> (Tensor)") emit("aten::logsumexp : (Tensor, int[], bool) -> (Tensor)") emit("aten::mean.dim : (Tensor, int[], bool, int?) -> (Tensor)") emit("aten::__and__.Tensor : (Tensor, Tensor) -> (Tensor)") emit("aten::sqrt : (Tensor) -> (Tensor)") emit("aten::_softmax : (Tensor, int, bool) -> (Tensor)") emit("aten::mean : (Tensor, int?) -> (Tensor)") emit("aten::std : (Tensor, bool) -> (Tensor)") emit("aten::var : (Tensor, bool) -> (Tensor)") emit("aten::nll_loss_forward : (Tensor, Tensor, Tensor?, int, int) -> (Tensor, Tensor)") emit("aten::nll_loss_backward : (Tensor, Tensor, Tensor, Tensor?, int, int, Tensor) -> (Tensor)") emit("aten::bincount : (Tensor, Tensor?, int) -> (Tensor)") emit("aten::constant_pad_nd : (Tensor, int[], Scalar) -> (Tensor)") emit("aten::pad : (Tensor, int[], str, float?) -> (Tensor)") emit("aten::squeeze.dim : (Tensor, int) -> (Tensor)", has_folder=True) emit("aten::unsqueeze : (Tensor, int) -> (Tensor)") emit("aten::squeeze : (Tensor) -> (Tensor)", has_folder=True) emit("aten::flatten.using_ints : (Tensor, int, int) -> (Tensor)") emit("aten::dim : (Tensor) -> (int)", has_folder=True) emit("aten::size : (Tensor) -> (int[])", has_canonicalizer=True) emit("aten::Bool.Tensor : (Tensor) -> (bool)") emit("aten::ones : (int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::new_ones : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::zeros : (int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::zero_ : (Tensor) -> (Tensor)") emit("aten::new_zeros : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::tensor : (t[], int?, Device?, bool) -> (Tensor)") emit("aten::tensor.bool : (bool, int?, Device?, bool) -> (Tensor)") emit("aten::tensor.int : (int, int?, Device?, bool) -> (Tensor)") emit("aten::_shape_as_tensor : (Tensor) -> (Tensor)") emit("aten::all : (Tensor) -> (Tensor)") emit("aten::any : (Tensor) -> (Tensor)") emit("aten::any.dim : (Tensor, int, bool) -> (Tensor)") emit("aten::arange : (Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::arange.start : (Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::arange.start_step : (Scalar, Scalar, Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::argmax : (Tensor, int?, bool) -> (Tensor)") emit("aten::bucketize.Tensor : (Tensor, Tensor, bool, bool) -> (Tensor)") emit("aten::clone : (Tensor, int?) -> (Tensor)") emit("aten::contiguous : (Tensor, int) -> (Tensor)") emit("aten::copy_ : (Tensor, Tensor, bool) -> (Tensor)") emit("aten::_to_copy : (Tensor, int?, int?, Device?, bool?, bool, int?) -> (Tensor)") emit("aten::detach : (Tensor) -> (Tensor)") emit("aten::embedding : (Tensor, Tensor, int, bool, bool) -> (Tensor)") emit("aten::empty_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::new_empty : (Tensor, int[], int?, int?, Device?, bool?) -> (Tensor)") emit("aten::zeros_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::ones_like : (Tensor, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::empty.memory_format : (int[], int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::expand : (Tensor, int[], bool) -> (Tensor)") emit("aten::expand_as : (Tensor, Tensor) -> (Tensor)") emit("aten::broadcast_to : (Tensor, int[]) -> (Tensor)") emit("aten::index.Tensor : (Tensor, Tensor?[]) -> (Tensor)") emit("aten::index_select : (Tensor, int, Tensor) -> (Tensor)") emit("aten::_index_put_impl_ : (Tensor, Tensor?[], Tensor, bool, bool) -> (Tensor)") emit("aten::item : (Tensor) -> (Scalar)") emit("aten::masked_select : (Tensor, Tensor) -> (Tensor)") emit("aten::numel : (Tensor) -> (int)") emit("aten::repeat : (Tensor, int[]) -> (Tensor)") emit("aten::reshape : (Tensor, int[]) -> (Tensor)") emit("aten::_reshape_alias : (Tensor, int[], int[]) -> (Tensor)") emit("aten::resize_ : (Tensor, int[], int?) -> (Tensor)") emit("aten::select.int : (Tensor, int, int) -> (Tensor)") emit("aten::size.int : (Tensor, int) -> (int)", has_folder=True) emit("aten::stack : (Tensor[], int) -> (Tensor)") emit("aten::sum : (Tensor, int?) -> (Tensor)") emit("aten::sum.dim_IntList : (Tensor, int[], bool, int?) -> (Tensor)") emit("aten::max : (Tensor) -> (Tensor)") emit("aten::max.dim : (Tensor, int, bool) -> (Tensor, Tensor)") emit("aten::to.dtype : (Tensor, int, bool, bool, int?) -> (Tensor)", has_folder=True) emit("aten::to.dtype_layout : (Tensor, int?, int?, Device?, bool?, bool, bool, int?) -> (Tensor)", has_folder=True) emit("aten::to.other : (Tensor, Tensor, bool, bool, int?) -> (Tensor)") emit("aten::to.prim_Device : (Tensor, Device?, int?, bool, bool) -> (Tensor)") emit("aten::type_as : (Tensor, Tensor) -> (Tensor)") emit("aten::view : (Tensor, int[]) -> (Tensor)", has_folder=True) emit("aten::_unsafe_view : (Tensor, int[]) -> (Tensor)") emit("aten::where.self : (Tensor, Tensor, Tensor) -> (Tensor)") emit("aten::where.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)") emit("aten::where.ScalarOther : (Tensor, Tensor, Scalar) -> (Tensor)") emit("aten::where.ScalarSelf : (Tensor, Scalar, Tensor) -> (Tensor)") emit("aten::slice.Tensor : (Tensor, int, int?, int?, int) -> (Tensor)") emit("aten::len.Tensor : (Tensor) -> (int)") emit("aten::cpu : (Tensor) -> (Tensor)") emit("aten::gather : (Tensor, int, Tensor, bool) -> (Tensor)") emit("aten::IntImplicit : (Tensor) -> (int)") emit("aten::tensor.float : (float, int?, Device?, bool) -> (Tensor)") emit("aten::Int.Tensor : (Tensor) -> (int)", has_folder=True) emit("aten::Float.Tensor : (Tensor) -> (float)", has_folder=True) emit_with_mutating_variants("aten::dropout : (Tensor, float, bool) -> (Tensor)") emit("aten::t : (Tensor) -> (Tensor)") emit("aten::full : (int[], Scalar, int?, int?, Device?, bool?) -> (Tensor)") emit("aten::full_like : (Tensor, Scalar, int?, int?, Device?, bool?, int?) -> (Tensor)") emit("aten::__contains__.str : (Dict(str, t), str) -> (bool)", has_folder=True) emit("aten::__getitem__.Dict_str : (Dict(str, t), str) -> (t)", has_folder=True) emit("aten::_set_item.str : (Dict(str, t), str, t) -> ()") emit("aten::keys.str : (Dict(str, t)) -> (str[])") emit("aten::get.default_str : (Dict(str, t), str, t) -> (t)") emit("aten::Delete.Dict_str : (Dict(str, t), str) -> ()") emit("aten::cat : (Tensor[], int) -> (Tensor)") emit("aten::append.t : (t[], t) -> (t[])") emit("aten::add.t : (t[], t[]) -> (t[])") emit("aten::eq.int_list : (int[], int[]) -> (bool)", has_folder=True) emit("aten::list.t : (t[]) -> (t[])") emit("aten::slice.t : (t[], int?, int?, int) -> (t[])") emit("aten::insert.t : (t[], int, t) -> ()") emit("aten::ne.int_list : (int[], int[]) -> (bool)") emit("aten::add.str : (str, str) -> (str)") emit("aten::eq.str : (str, str) -> (bool)", has_folder=True) emit("aten::str : (t) -> (str)") emit("aten::format : (...) -> (str)") emit("aten::join : (str, str[]) -> (str)") emit("aten::Float.Scalar : (Scalar) -> (float)", has_folder=True) emit("aten::Float.str : (str) -> (float)") emit("aten::Int.float : (float) -> (int)") emit("aten::__range_length : (int, int, int) -> (int)", has_folder=True) emit("aten::__derive_index : (int, int, int) -> (int)", has_folder=True) emit("aten::gt.int : (int, int) -> (bool)", has_folder=True) emit("aten::ge.int : (int, int) -> (bool)", has_folder=True) emit("aten::lt.int : (int, int) -> (bool)", has_folder=True) emit("aten::le.int : (int, int) -> (bool)", has_folder=True) emit("aten::ne.int : (int, int) -> (bool)", has_folder=True) emit("aten::eq.int : (int, int) -> (bool)", has_folder=True) emit("aten::floordiv.int : (int, int) -> (int)", has_folder=True) emit("aten::remainder.int : (int, int) -> (int)", has_folder=True) emit("aten::add.int : (int, int) -> (int)", has_folder=True) emit("aten::sub.int : (int, int) -> (int)", has_folder=True) emit("aten::mul.int : (int, int) -> (int)", has_folder=True) emit("aten::neg.int : (int) -> (int)", has_folder=True) emit("aten::log.int : (int) -> (float)") emit("aten::add.float_int : (float, int) -> (float)") emit("aten::sub.float : (float, float) -> (float)") emit("aten::mul.float : (float, float) -> (float)") emit("aten::div.float : (float, float) -> (float)", has_folder=True) emit("aten::neg.float : (float) -> (float)") emit("aten::eq.float : (float, float) -> (bool)", has_folder=True) emit("aten::gt.float : (float, float) -> (bool)", has_folder=True) emit("aten::ge.float : (float, float) -> (bool)", has_folder=True) emit("aten::lt.float : (float, float) -> (bool)", has_folder=True) emit("aten::lt.float_int : (float, int) -> (bool)") emit("aten::ge.float_int : (float, int) -> (bool)") emit("aten::ne.float_int : (float, int) -> (bool)") emit("aten::gt.float_int : (float, int) -> (bool)") emit("aten::__and__.bool : (bool, bool) -> (bool)") emit("aten::ne.bool : (bool, bool) -> (bool)", has_folder=True) emit("aten::__is__ : (t1, t2) -> (bool)", has_folder=True) emit("aten::__isnot__ : (t1, t2) -> (bool)", has_folder=True) emit("aten::__not__ : (bool) -> (bool)", has_folder=True) emit("aten::len.t : (t[]) -> (int)", has_folder=True, has_canonicalizer=True) emit("aten::__getitem__.t : (t[], int) -> (t)", has_canonicalizer=True) emit("aten::_set_item.t : (t[], int, t) -> (t[])") emit("aten::div : (Scalar, Scalar) -> (float)") emit("aten::add : (Scalar, Scalar) -> (Scalar)") emit("aten::eq.device : (Device, Device) -> (bool)") emit("aten::ceil.float : (float) -> (int)", has_folder=True) emit("aten::_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)") emit("aten::tanh_backward : (Tensor, Tensor) -> (Tensor)") emit("aten::gelu_backward : (Tensor, Tensor, str) -> (Tensor)") emit("aten::_log_softmax_backward_data : (Tensor, Tensor, int, int) -> (Tensor)") emit("prim::layout : (Tensor) -> (int)") emit("prim::TupleIndex : (Any, int) -> (Any)", has_canonicalizer=True) emit("prim::device : (Tensor) -> (Device)") emit("prim::dtype : (Tensor) -> (int)", has_folder=True) emit("prim::TupleUnpack : (Any) -> (...)", has_canonicalizer=True) emit("prim::NumToTensor.Scalar : (Scalar) -> (Tensor)") emit("prim::min.self_int : (int[]) -> (int)", has_folder=True) emit("prim::min.int : (int, int) -> (int)") emit("prim::max.self_int : (int[]) -> (int)") emit("prim::max.int : (int, int) -> (int)", has_folder=True) emit("prim::RaiseException : (str, str?) -> ()") emit("prim::Uninitialized : () -> (Any)", has_canonicalizer=True, traits=["NoSideEffect"]) emit("prim::unchecked_cast : (t) -> (t)", has_folder=True, traits=["DeclareOpInterfaceMethods<CastOpInterface>"]) emit("prim::Print : (...) -> ()") emit("prim::tolist : (...) -> (...)") emit("prim::abs.Scalar : (Scalar) -> (Scalar)") emit( "quantized::linear : (Tensor, __torch__.torch.classes.quantized.LinearPackedParamsBase, float, int) -> (Tensor)", traits=["HasValueSemantics"]) def dump_registered_ops(outfile: TextIO, registry: Registry): for _, v in sorted(registry.by_unique_key.items()): outfile.write(repr(v)) def main(args: argparse.Namespace): registry = Registry.load() if args.debug_registry_dump: with open(args.debug_registry_dump, "w") as debug_registry_dump: dump_registered_ops(debug_registry_dump, registry) td_path = os.path.join(args.torch_ir_include_dir, "GeneratedTorchOps.td") with open(td_path, "w") as f_td: emitter_td = TextEmitter(f_td) emitter_td.print(ODS_BANNER) emit_ops(emitter_td, registry) def _create_argparse() -> argparse.ArgumentParser: parser = argparse.ArgumentParser(prog="generate_ods") parser.add_argument( "--torch_ir_include_dir", required=True, help="Directory in include/ containing the Torch dialect") parser.add_argument( "--debug_registry_dump", help="File to dump the the PyTorch JIT operator registry into") return parser if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) parser = _create_argparse() args = parser.parse_args() main(args)
true
true
f7039ed8f3ea00ea4a9006ac8bbcd14458307bde
2,085
py
Python
MaixPy/components/micropython/port/src/ulab/micropython-ulab/docs/manual/source/conf.py
valerio-vaccaro/krux
a3718a4e12ef6f92ada98e02d0d286a971a56434
[ "MIT" ]
1
2020-08-28T20:38:54.000Z
2020-08-28T20:38:54.000Z
MaixPy/components/micropython/port/src/ulab/micropython-ulab/docs/manual/source/conf.py
valerio-vaccaro/krux
a3718a4e12ef6f92ada98e02d0d286a971a56434
[ "MIT" ]
null
null
null
MaixPy/components/micropython/port/src/ulab/micropython-ulab/docs/manual/source/conf.py
valerio-vaccaro/krux
a3718a4e12ef6f92ada98e02d0d286a971a56434
[ "MIT" ]
null
null
null
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'micropython-ulab' copyright = '2019, Zoltán Vörös' author = 'Zoltán Vörös' # The full version, including alpha/beta/rc tags release = '0.26' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] master_doc = 'index' author=u'Zoltán Vörös' copyright=author language='en' latex_documents = [ (master_doc, 'ulab-manual.tex', 'Micropython ulab documentation', 'Zoltán Vörös', 'manual'), ]
30.661765
79
0.667626
project = 'micropython-ulab' copyright = '2019, Zoltán Vörös' author = 'Zoltán Vörös' release = '0.26' extensions = [ ] templates_path = ['_templates'] exclude_patterns = [] html_theme = 'sphinx_rtd_theme' html_static_path = ['_static'] master_doc = 'index' author=u'Zoltán Vörös' copyright=author language='en' latex_documents = [ (master_doc, 'ulab-manual.tex', 'Micropython ulab documentation', 'Zoltán Vörös', 'manual'), ]
true
true
f7039f924072b7979a9db1aca50b6ea7282999b5
1,613
py
Python
src/textlabelling/experimentparam.py
aakinlalu/textlabelling
e2c8fe5f68e92d70249d37cd6eb13a3ab046a891
[ "MIT" ]
1
2021-01-12T01:04:00.000Z
2021-01-12T01:04:00.000Z
src/textlabelling/experimentparam.py
aakinlalu/textlabelling
e2c8fe5f68e92d70249d37cd6eb13a3ab046a891
[ "MIT" ]
null
null
null
src/textlabelling/experimentparam.py
aakinlalu/textlabelling
e2c8fe5f68e92d70249d37cd6eb13a3ab046a891
[ "MIT" ]
null
null
null
import spacy from spacy.lang.en import English from spacy.util import minibatch, compounding from spacy.util import decaying class ExperimentParam: def __init__(self, TRAIN_DATA: list, max_batch_sizes: dict, model_type='ner', dropout_start: float = 0.6, dropout_end: float = 0.2, interval: float = 1e-4): self.TRAIN_DATA = TRAIN_DATA self.max_batch_sizes = max_batch_sizes self.model_type = model_type self.dropout_start = dropout_start self.dropout_end = dropout_end self.interval = interval def get_batches(self): """ max_batch_sizes = Initialize with batch size 1, and compound to a maximum determined by your data size and problem type. {"tagger": 32, "parser": 16, "ner": 16, "textcat": 64} """ max_batch_size = self.max_batch_sizes[self.model_type] if len(self.TRAIN_DATA) < 1000: max_batch_size /= 2 if len(self.TRAIN_DATA) < 500: max_batch_size /= 2 batch_size = compounding(1, max_batch_size, 1.001) batches = minibatch(self.TRAIN_DATA, size=batch_size) return batches @property def determine_dropout(self): """ For small datasets, it’s useful to set a high dropout rate at first, and decay it down towards a more reasonable value. This helps avoid the network immediately overfitting, while still encouraging it to learn some of the more interesting things in your data. """ dropout = decaying(self.dropout_start, self.dropout_end, self.interval) return dropout
40.325
269
0.6677
import spacy from spacy.lang.en import English from spacy.util import minibatch, compounding from spacy.util import decaying class ExperimentParam: def __init__(self, TRAIN_DATA: list, max_batch_sizes: dict, model_type='ner', dropout_start: float = 0.6, dropout_end: float = 0.2, interval: float = 1e-4): self.TRAIN_DATA = TRAIN_DATA self.max_batch_sizes = max_batch_sizes self.model_type = model_type self.dropout_start = dropout_start self.dropout_end = dropout_end self.interval = interval def get_batches(self): max_batch_size = self.max_batch_sizes[self.model_type] if len(self.TRAIN_DATA) < 1000: max_batch_size /= 2 if len(self.TRAIN_DATA) < 500: max_batch_size /= 2 batch_size = compounding(1, max_batch_size, 1.001) batches = minibatch(self.TRAIN_DATA, size=batch_size) return batches @property def determine_dropout(self): dropout = decaying(self.dropout_start, self.dropout_end, self.interval) return dropout
true
true
f7039fc4a1a734c44cfbc0383120c8162bc3c829
678
py
Python
Part_3_advanced/m14_metaclass/register_cls/example_2/main.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
Part_3_advanced/m14_metaclass/register_cls/example_2/main.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
Part_3_advanced/m14_metaclass/register_cls/example_2/main.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
from example_system import serializer from example_system.bike import Bike from example_system.human import Human def run_example() -> None: krzysztof = Human(name="Krzysztof", age=37) giant_bike = Bike(brand="Giant", model="Contend AR") krzysztof_json = serializer.serialize(krzysztof) print(krzysztof_json) bike_json = serializer.serialize(giant_bike) print(bike_json) krzysztof_deserialized = serializer.deserialize(krzysztof_json) print(krzysztof) print(krzysztof_deserialized) bike_deserialized = serializer.deserialize(bike_json) print(giant_bike) print(bike_deserialized) if __name__ == "__main__": run_example()
27.12
67
0.755162
from example_system import serializer from example_system.bike import Bike from example_system.human import Human def run_example() -> None: krzysztof = Human(name="Krzysztof", age=37) giant_bike = Bike(brand="Giant", model="Contend AR") krzysztof_json = serializer.serialize(krzysztof) print(krzysztof_json) bike_json = serializer.serialize(giant_bike) print(bike_json) krzysztof_deserialized = serializer.deserialize(krzysztof_json) print(krzysztof) print(krzysztof_deserialized) bike_deserialized = serializer.deserialize(bike_json) print(giant_bike) print(bike_deserialized) if __name__ == "__main__": run_example()
true
true
f703a0383ac9c6b9d0f7e37d998648f8cfbc1ec4
343
py
Python
other/dingding/dingtalk/api/rest/OapiSmartdeviceBatcheventPostRequest.py
hth945/pytest
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
[ "Apache-2.0" ]
null
null
null
other/dingding/dingtalk/api/rest/OapiSmartdeviceBatcheventPostRequest.py
hth945/pytest
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
[ "Apache-2.0" ]
null
null
null
other/dingding/dingtalk/api/rest/OapiSmartdeviceBatcheventPostRequest.py
hth945/pytest
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
[ "Apache-2.0" ]
null
null
null
''' Created by auto_sdk on 2020.11.25 ''' from dingtalk.api.base import RestApi class OapiSmartdeviceBatcheventPostRequest(RestApi): def __init__(self,url=None): RestApi.__init__(self,url) self.device_event_vos = None def getHttpMethod(self): return 'POST' def getapiname(self): return 'dingtalk.oapi.smartdevice.batchevent.post'
22.866667
52
0.772595
from dingtalk.api.base import RestApi class OapiSmartdeviceBatcheventPostRequest(RestApi): def __init__(self,url=None): RestApi.__init__(self,url) self.device_event_vos = None def getHttpMethod(self): return 'POST' def getapiname(self): return 'dingtalk.oapi.smartdevice.batchevent.post'
true
true
f703a1141807b8881b25f73ca2ae811357879d80
1,032
py
Python
hermione/module_templates/__IMPLEMENTED_BASE__/src/predict.py
RodrigoATorres/hermione
6cbed73e309f8025a48f33165d8f29561c6a3cc7
[ "Apache-2.0" ]
183
2020-06-03T22:43:14.000Z
2022-03-17T22:39:07.000Z
hermione/file_text/predict.txt
gquaresma89/hermione
c51f5e54a41609099eef48990c7ad7018dcdf41a
[ "Apache-2.0" ]
31
2020-06-03T22:55:18.000Z
2022-03-27T20:06:17.000Z
hermione/file_text/predict.txt
gquaresma89/hermione
c51f5e54a41609099eef48990c7ad7018dcdf41a
[ "Apache-2.0" ]
43
2020-06-03T22:45:03.000Z
2021-12-29T19:43:54.000Z
import pandas as pd import io from joblib import load import logging logging.getLogger().setLevel(logging.INFO) def generate_data(): new_data = pd.DataFrame({ 'Pclass':[3,2,1], 'Sex': ['male', 'female', 'male'], 'Age':[4, 22, 28] }) return new_data def load_model(): try: return load('../output/titanic_model_rf.pkl') except: try: return load('../../output/titanic_model_rf.pkl') except: logging.error('Model not loaded') def predict_new(X, probs=True): model = load_model() p = model.get_preprocessing() X = p.clean_data(X) X = p.categ_encoding(X) columns = model.get_columns() for col in columns: if col not in X.columns: X[col] = 0 if probs: return model.predict_proba(X)[:,1] else: return model.predict(X) if __name__ == "__main__": df = generate_data() preds = predict_new(df, probs=True) logging.info("Predictions:") print(preds)
20.64
60
0.584302
import pandas as pd import io from joblib import load import logging logging.getLogger().setLevel(logging.INFO) def generate_data(): new_data = pd.DataFrame({ 'Pclass':[3,2,1], 'Sex': ['male', 'female', 'male'], 'Age':[4, 22, 28] }) return new_data def load_model(): try: return load('../output/titanic_model_rf.pkl') except: try: return load('../../output/titanic_model_rf.pkl') except: logging.error('Model not loaded') def predict_new(X, probs=True): model = load_model() p = model.get_preprocessing() X = p.clean_data(X) X = p.categ_encoding(X) columns = model.get_columns() for col in columns: if col not in X.columns: X[col] = 0 if probs: return model.predict_proba(X)[:,1] else: return model.predict(X) if __name__ == "__main__": df = generate_data() preds = predict_new(df, probs=True) logging.info("Predictions:") print(preds)
true
true
f703a230af22a6d9ac2792f00e4cc97acc420aa4
8,919
py
Python
src/frico/blocks.py
mmangus/frico
0d95340a53a7f6da3792fdd241eb3ed986fe894b
[ "MIT" ]
null
null
null
src/frico/blocks.py
mmangus/frico
0d95340a53a7f6da3792fdd241eb3ed986fe894b
[ "MIT" ]
null
null
null
src/frico/blocks.py
mmangus/frico
0d95340a53a7f6da3792fdd241eb3ed986fe894b
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from datetime import datetime from typing import Generic, Type, TypeVar, Union from .devices import I2CDevice from .parsers import RegisterParser from .typing import RegisterState BlockType = TypeVar("BlockType") class RegisterBlock(Generic[BlockType], ABC): """ Abstract base class for collections of registers that represent distinct features of an I2C device. A RegisterBlock translates between high-level data structures and the low-level representation of that data as expressed by RegisterParsers. For example, for the DS series RTCs, there are sub- classes of RegisterBlock for the clock, the alarms, and their configuration states. The Clock subclass encapsulates RegisterParsers for the BCD-ish encoding of the Hour, Minute, Second, etc. stored in the device registers. RegisterBlock is a Generic type. When subclassing, add the appropriate type for the value represented by the subclass to its signature: class TimekeepingRegisterBlock(RegisterBlock[datetime]): ... A RegisterBlock subclass should define one or more attributes that are RegisterParsers. Subclasses must also define two methods: 1) `_value` to read the data from its attributes and produce a value of the designated type 2) `_prepare_update` to set its attributes to a given value For example, suppose some device stored a positive decimal number like 12.34 with the integer part in register 0x00 and the fractional part in register 0x01, each represented as 2 digit standard BCD. You want to read or write this value as a 2-tuple of ints. A RegisterBlock for accessing this number could be: class DecimalRegisterBlock(RegisterBlock[Tuple[int, int]]): integer_part = BCDRegisterParser(0x00) fractional_part = BCDRegisterParser(0x01) def _value(self) -> Tuple[int, int]: return self.integer_part, self.fractional_part def _prepare_update(self, value: Tuple[int, int]) -> None: self.integer_part, self.fractional_part = value """ @property def register_state(self) -> "RegisterState": """ Accesses register state from the most recent read of the parent device. """ return self._register_state @register_state.setter def register_state(self, state: "RegisterState") -> None: """ Setting register_state also keeps a copy to use as pending_state. """ self._register_state = state self.pending_state = self._register_state.copy() def __init__(self) -> None: """ Initialize a new RegisterBlock. RegisterBlock is a data descriptor, so it must be used as an attribute on a subclass of I2CDevice in order to have access to the device register state. """ # The very first access to the descriptor will populate actual state. self.register_state: RegisterState = [] def __get__( self, instance: "I2CDevice", owner: Type["I2CDevice"] ) -> BlockType: """ RegisterBlock is a data descriptor with access to the state of the I2CDevice instance that it belongs to, so we can use that register state for all parsers associated with this RegisterBlock (see RegisterParser.__get__). It is important for all RegisterParser instances to have a shared register state (i.e. the state stored in this class) in order to avoid mistakes if the state changes during a read. For example, if an RTC's Second register is read at 0 minutes 59 seconds, and then the clock ticks before we read the Minute register, the time would come out as 1 minute 59 seconds. Maxim DS RTCs (and probably others) use of 2 sets of registers to prevent this issue from affecting I2C block reads, so we just need to make sure we only make one call to `read_registers()` for all the RegisterParsers within a RegisterBlock. """ if not instance: raise AttributeError( "RegisterBlock must be accessed from an I2CDevice instance." ) self.register_state = instance.read_registers() return self._value() def __set__(self, instance: "I2CDevice", value: BlockType) -> None: """ Setting the value of the RegisterBlock updates its state via the RegisterParser descriptors that belong to the block. """ # Make sure we have the latest state loaded before modifying it self.register_state = instance.read_registers() self._prepare_update(value) # A minor optimization to only write a contiguous block from the first # changed register to the last changed register, leaving the rest # unmodified. This helps improve the speed of small updates. addresses_changed = [ i for i, b in enumerate(self.pending_state) if b != self._register_state[i] ] first_changed = min(addresses_changed) last_changed = max(addresses_changed) to_write = self.pending_state[first_changed : last_changed + 1] instance.write_registers(to_write, first_changed) @abstractmethod def _prepare_update(self, value: BlockType) -> None: """ Subclasses should define behavior for setting the values of their RegisterParser attributes to reflect the requested `value` for the RegisterBlock. Parsers' `__set__` methods call `update_register_state` on this instance so they can all keep their pending state in sync. """ @abstractmethod def _value(self) -> BlockType: """ Value should return an appropriate object to represent the state of this register block e.g. a datetime for the clock/alarms or a float for the temperature """ def update_register_state( self, address: Union[int, slice], value: "RegisterState" ) -> None: """ RegisterParsers should call this method to stage their changes to the register state. This allows parsers to be aware of each other's pending changes so e.g. two distinct parsers can flip two different bits in the same register. Once all parsers have staged their changes (implement via _prepare_update), the __set__ method will write all the changes to the parent I2CDevice instance. Parameters ---------- address : Union[int, slice] The register address(es) to set value : RegisterState The bytes to insert at address """ if isinstance(address, int): address = slice(address, address + 1) if len(value) != len(self.pending_state[address]): raise ValueError("Value must have as many bytes as slice") self.pending_state[address] = value class DatetimeRegisterBlock(RegisterBlock[datetime]): """ Base class whose subclasses keep track of the register addresses where various components of the date/time/alarms are stored for RTC ICs such as the Maxim DS series. """ hour: RegisterParser[int] minute: RegisterParser[int] day_of_month: RegisterParser[int] # Define defaults for attributes that may be left unset, e.g. the DS3231 # and DS1337 have no seconds for Alarm 2, and no year or month for either # Alarm. @property def second(self) -> Union[RegisterParser[int], int]: return 0 @second.setter def second(self, value: int) -> None: pass @property def month(self) -> Union[RegisterParser[int], int]: return datetime.now().month @month.setter def month(self, value: int) -> None: pass @property def year(self) -> Union[RegisterParser[int], int]: return datetime.now().year @year.setter def year(self, value: int) -> None: pass def _prepare_update(self, value: datetime) -> None: # FIXME pycharm doesn't understand you can assign an int to the # parser descriptors, but mypy does self.second = value.second self.minute = value.minute self.hour = value.hour self.day_of_month = value.day self.month = value.month self.year = value.year def _value(self) -> datetime: try: value = datetime( self.year, self.month, self.day_of_month, self.hour, self.minute, self.second, ) except ValueError as err: raise ValueError( "Could not parse datetime. Perhaps the register state is" "invalid? Try setting to a known valid state first." ) from err return value
38.947598
79
0.659603
from abc import ABC, abstractmethod from datetime import datetime from typing import Generic, Type, TypeVar, Union from .devices import I2CDevice from .parsers import RegisterParser from .typing import RegisterState BlockType = TypeVar("BlockType") class RegisterBlock(Generic[BlockType], ABC): @property def register_state(self) -> "RegisterState": return self._register_state @register_state.setter def register_state(self, state: "RegisterState") -> None: self._register_state = state self.pending_state = self._register_state.copy() def __init__(self) -> None: self.register_state: RegisterState = [] def __get__( self, instance: "I2CDevice", owner: Type["I2CDevice"] ) -> BlockType: if not instance: raise AttributeError( "RegisterBlock must be accessed from an I2CDevice instance." ) self.register_state = instance.read_registers() return self._value() def __set__(self, instance: "I2CDevice", value: BlockType) -> None: self.register_state = instance.read_registers() self._prepare_update(value) addresses_changed = [ i for i, b in enumerate(self.pending_state) if b != self._register_state[i] ] first_changed = min(addresses_changed) last_changed = max(addresses_changed) to_write = self.pending_state[first_changed : last_changed + 1] instance.write_registers(to_write, first_changed) @abstractmethod def _prepare_update(self, value: BlockType) -> None: @abstractmethod def _value(self) -> BlockType: def update_register_state( self, address: Union[int, slice], value: "RegisterState" ) -> None: if isinstance(address, int): address = slice(address, address + 1) if len(value) != len(self.pending_state[address]): raise ValueError("Value must have as many bytes as slice") self.pending_state[address] = value class DatetimeRegisterBlock(RegisterBlock[datetime]): hour: RegisterParser[int] minute: RegisterParser[int] day_of_month: RegisterParser[int] @property def second(self) -> Union[RegisterParser[int], int]: return 0 @second.setter def second(self, value: int) -> None: pass @property def month(self) -> Union[RegisterParser[int], int]: return datetime.now().month @month.setter def month(self, value: int) -> None: pass @property def year(self) -> Union[RegisterParser[int], int]: return datetime.now().year @year.setter def year(self, value: int) -> None: pass def _prepare_update(self, value: datetime) -> None: # parser descriptors, but mypy does self.second = value.second self.minute = value.minute self.hour = value.hour self.day_of_month = value.day self.month = value.month self.year = value.year def _value(self) -> datetime: try: value = datetime( self.year, self.month, self.day_of_month, self.hour, self.minute, self.second, ) except ValueError as err: raise ValueError( "Could not parse datetime. Perhaps the register state is" "invalid? Try setting to a known valid state first." ) from err return value
true
true
f703a27ca290c7545f1f668cdb2157b3fb23e494
65
py
Python
atompack/__init__.py
seatonullberg/atompack
5d488ec8a4949cdeea3a97072ed092cc331c2198
[ "MIT" ]
null
null
null
atompack/__init__.py
seatonullberg/atompack
5d488ec8a4949cdeea3a97072ed092cc331c2198
[ "MIT" ]
null
null
null
atompack/__init__.py
seatonullberg/atompack
5d488ec8a4949cdeea3a97072ed092cc331c2198
[ "MIT" ]
null
null
null
"""A flexible Python library for atomic structure generation."""
32.5
64
0.769231
true
true
f703a2fda15b537ff3df48a0cd9175e8f5d2f38c
43,765
py
Python
seno/rpc/wallet_rpc_api.py
emilson0407/seno-blockchain
fa73fc06639faaacbb82504a6c8698c3bcab57c0
[ "Apache-2.0" ]
33
2021-06-26T22:50:48.000Z
2022-02-09T04:31:40.000Z
seno/rpc/wallet_rpc_api.py
emilson0407/seno-blockchain
fa73fc06639faaacbb82504a6c8698c3bcab57c0
[ "Apache-2.0" ]
18
2021-06-27T17:13:13.000Z
2022-01-04T11:45:56.000Z
seno/rpc/wallet_rpc_api.py
emilson0407/seno-blockchain
fa73fc06639faaacbb82504a6c8698c3bcab57c0
[ "Apache-2.0" ]
19
2021-06-26T00:17:08.000Z
2022-03-15T06:58:21.000Z
import asyncio import logging import time from datetime import datetime from pathlib import Path from typing import Callable, Dict, List, Optional, Tuple from blspy import PrivateKey, G1Element from seno.cmds.init_funcs import check_keys from seno.consensus.block_rewards import calculate_base_farmer_reward from seno.protocols.protocol_message_types import ProtocolMessageTypes from seno.server.outbound_message import NodeType, make_msg from seno.simulator.simulator_protocol import FarmNewBlockProtocol from seno.types.blockchain_format.coin import Coin from seno.types.blockchain_format.sized_bytes import bytes32 from seno.util.bech32m import decode_puzzle_hash, encode_puzzle_hash from seno.util.byte_types import hexstr_to_bytes from seno.util.ints import uint32, uint64 from seno.util.keychain import bytes_to_mnemonic, generate_mnemonic from seno.util.path import path_from_root from seno.util.ws_message import WsRpcMessage, create_payload_dict from seno.wallet.cc_wallet.cc_wallet import CCWallet from seno.wallet.rl_wallet.rl_wallet import RLWallet from seno.wallet.did_wallet.did_wallet import DIDWallet from seno.wallet.trade_record import TradeRecord from seno.wallet.transaction_record import TransactionRecord from seno.wallet.util.backup_utils import download_backup, get_backup_info, upload_backup from seno.wallet.util.trade_utils import trade_record_to_dict from seno.wallet.util.transaction_type import TransactionType from seno.wallet.util.wallet_types import WalletType from seno.wallet.wallet_info import WalletInfo from seno.wallet.wallet_node import WalletNode # Timeout for response from wallet/full node for sending a transaction TIMEOUT = 30 log = logging.getLogger(__name__) class WalletRpcApi: def __init__(self, wallet_node: WalletNode): assert wallet_node is not None self.service = wallet_node self.service_name = "seno_wallet" def get_routes(self) -> Dict[str, Callable]: return { # Key management "/log_in": self.log_in, "/get_public_keys": self.get_public_keys, "/get_private_key": self.get_private_key, "/generate_mnemonic": self.generate_mnemonic, "/add_key": self.add_key, "/delete_key": self.delete_key, "/delete_all_keys": self.delete_all_keys, # Wallet node "/get_sync_status": self.get_sync_status, "/get_height_info": self.get_height_info, "/farm_block": self.farm_block, # Only when node simulator is running "/get_initial_freeze_period": self.get_initial_freeze_period, "/get_network_info": self.get_network_info, # Wallet management "/get_wallets": self.get_wallets, "/create_new_wallet": self.create_new_wallet, # Wallet "/get_wallet_balance": self.get_wallet_balance, "/get_transaction": self.get_transaction, "/get_transactions": self.get_transactions, "/get_next_address": self.get_next_address, "/send_transaction": self.send_transaction, "/create_backup": self.create_backup, "/get_transaction_count": self.get_transaction_count, "/get_farmed_amount": self.get_farmed_amount, "/create_signed_transaction": self.create_signed_transaction, # Coloured coins and trading "/cc_set_name": self.cc_set_name, "/cc_get_name": self.cc_get_name, "/cc_spend": self.cc_spend, "/cc_get_colour": self.cc_get_colour, "/create_offer_for_ids": self.create_offer_for_ids, "/get_discrepancies_for_offer": self.get_discrepancies_for_offer, "/respond_to_offer": self.respond_to_offer, "/get_trade": self.get_trade, "/get_all_trades": self.get_all_trades, "/cancel_trade": self.cancel_trade, # DID Wallet "/did_update_recovery_ids": self.did_update_recovery_ids, "/did_spend": self.did_spend, "/did_get_pubkey": self.did_get_pubkey, "/did_get_did": self.did_get_did, "/did_recovery_spend": self.did_recovery_spend, "/did_get_recovery_list": self.did_get_recovery_list, "/did_create_attest": self.did_create_attest, "/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery, "/did_create_backup_file": self.did_create_backup_file, # RL wallet "/rl_set_user_info": self.rl_set_user_info, "/send_clawback_transaction:": self.send_clawback_transaction, "/add_rate_limited_funds:": self.add_rate_limited_funds, } async def _state_changed(self, *args) -> List[WsRpcMessage]: """ Called by the WalletNode or WalletStateManager when something has changed in the wallet. This gives us an opportunity to send notifications to all connected clients via WebSocket. """ if len(args) < 2: return [] data = { "state": args[0], } if args[1] is not None: data["wallet_id"] = args[1] if args[2] is not None: data["additional_data"] = args[2] return [create_payload_dict("state_changed", data, "seno_wallet", "wallet_ui")] async def _stop_wallet(self): """ Stops a currently running wallet/key, which allows starting the wallet with a new key. Each key has it's own wallet database. """ if self.service is not None: self.service._close() await self.service._await_closed() ########################################################################################## # Key management ########################################################################################## async def log_in(self, request): """ Logs in the wallet with a specific key. """ fingerprint = request["fingerprint"] if self.service.logged_in_fingerprint == fingerprint: return {"fingerprint": fingerprint} await self._stop_wallet() log_in_type = request["type"] recovery_host = request["host"] testing = False if "testing" in self.service.config and self.service.config["testing"] is True: testing = True if log_in_type == "skip": started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True) elif log_in_type == "restore_backup": file_path = Path(request["file_path"]) started = await self.service._start(fingerprint=fingerprint, backup_file=file_path) else: started = await self.service._start(fingerprint) if started is True: return {"fingerprint": fingerprint} elif testing is True and self.service.backup_initialized is False: response = {"success": False, "error": "not_initialized"} return response elif self.service.backup_initialized is False: backup_info = None backup_path = None try: private_key = self.service.get_key_for_fingerprint(fingerprint) last_recovery = await download_backup(recovery_host, private_key) backup_path = path_from_root(self.service.root_path, "last_recovery") if backup_path.exists(): backup_path.unlink() backup_path.write_text(last_recovery) backup_info = get_backup_info(backup_path, private_key) backup_info["backup_host"] = recovery_host backup_info["downloaded"] = True except Exception as e: log.error(f"error {e}") response = {"success": False, "error": "not_initialized"} if backup_info is not None: response["backup_info"] = backup_info response["backup_path"] = f"{backup_path}" return response return {"success": False, "error": "Unknown Error"} async def get_public_keys(self, request: Dict): fingerprints = [sk.get_g1().get_fingerprint() for (sk, seed) in self.service.keychain.get_all_private_keys()] return {"public_key_fingerprints": fingerprints} async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]: for sk, seed in self.service.keychain.get_all_private_keys(): if sk.get_g1().get_fingerprint() == fingerprint: return sk, seed return None, None async def get_private_key(self, request): fingerprint = request["fingerprint"] sk, seed = await self._get_private_key(fingerprint) if sk is not None: s = bytes_to_mnemonic(seed) if seed is not None else None return { "private_key": { "fingerprint": fingerprint, "sk": bytes(sk).hex(), "pk": bytes(sk.get_g1()).hex(), "seed": s, }, } return {"success": False, "private_key": {"fingerprint": fingerprint}} async def generate_mnemonic(self, request: Dict): return {"mnemonic": generate_mnemonic().split(" ")} async def add_key(self, request): if "mnemonic" not in request: raise ValueError("Mnemonic not in request") # Adding a key from 24 word mnemonic mnemonic = request["mnemonic"] passphrase = "" try: sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase) except KeyError as e: return { "success": False, "error": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0], } fingerprint = sk.get_g1().get_fingerprint() await self._stop_wallet() # Makes sure the new key is added to config properly started = False check_keys(self.service.root_path) request_type = request["type"] if request_type == "new_wallet": started = await self.service._start(fingerprint=fingerprint, new_wallet=True) elif request_type == "skip": started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True) elif request_type == "restore_backup": file_path = Path(request["file_path"]) started = await self.service._start(fingerprint=fingerprint, backup_file=file_path) if started is True: return {"fingerprint": fingerprint} raise ValueError("Failed to start") async def delete_key(self, request): await self._stop_wallet() fingerprint = request["fingerprint"] self.service.keychain.delete_key_by_fingerprint(fingerprint) path = path_from_root( self.service.root_path, f"{self.service.config['database_path']}-{fingerprint}", ) if path.exists(): path.unlink() return {} async def delete_all_keys(self, request: Dict): await self._stop_wallet() self.service.keychain.delete_all_keys() path = path_from_root(self.service.root_path, self.service.config["database_path"]) if path.exists(): path.unlink() return {} ########################################################################################## # Wallet Node ########################################################################################## async def get_sync_status(self, request: Dict): assert self.service.wallet_state_manager is not None syncing = self.service.wallet_state_manager.sync_mode synced = await self.service.wallet_state_manager.synced() return {"synced": synced, "syncing": syncing, "genesis_initialized": True} async def get_height_info(self, request: Dict): assert self.service.wallet_state_manager is not None peak = self.service.wallet_state_manager.peak if peak is None: return {"height": 0} else: return {"height": peak.height} async def get_network_info(self, request: Dict): assert self.service.wallet_state_manager is not None network_name = self.service.config["selected_network"] address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"] return {"network_name": network_name, "network_prefix": address_prefix} async def farm_block(self, request): raw_puzzle_hash = decode_puzzle_hash(request["address"]) request = FarmNewBlockProtocol(raw_puzzle_hash) msg = make_msg(ProtocolMessageTypes.farm_new_block, request) await self.service.server.send_to_all([msg], NodeType.FULL_NODE) return {} ########################################################################################## # Wallet Management ########################################################################################## async def get_wallets(self, request: Dict): assert self.service.wallet_state_manager is not None wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries() return {"wallets": wallets} async def _create_backup_and_upload(self, host) -> None: assert self.service.wallet_state_manager is not None try: if "testing" in self.service.config and self.service.config["testing"] is True: return None now = time.time() file_name = f"backup_{now}" path = path_from_root(self.service.root_path, file_name) await self.service.wallet_state_manager.create_wallet_backup(path) backup_text = path.read_text() response = await upload_backup(host, backup_text) success = response["success"] if success is False: log.error("Failed to upload backup to wallet backup service") elif success is True: log.info("Finished upload of the backup file") except Exception as e: log.error(f"Exception in upload backup. Error: {e}") async def create_new_wallet(self, request: Dict): assert self.service.wallet_state_manager is not None wallet_state_manager = self.service.wallet_state_manager main_wallet = wallet_state_manager.main_wallet host = request["host"] if request["wallet_type"] == "cc_wallet": if request["mode"] == "new": async with self.service.wallet_state_manager.lock: cc_wallet: CCWallet = await CCWallet.create_new_cc( wallet_state_manager, main_wallet, request["amount"] ) colour = cc_wallet.get_colour() asyncio.create_task(self._create_backup_and_upload(host)) return { "type": cc_wallet.type(), "colour": colour, "wallet_id": cc_wallet.id(), } elif request["mode"] == "existing": async with self.service.wallet_state_manager.lock: cc_wallet = await CCWallet.create_wallet_for_cc( wallet_state_manager, main_wallet, request["colour"] ) asyncio.create_task(self._create_backup_and_upload(host)) return {"type": cc_wallet.type()} elif request["wallet_type"] == "rl_wallet": if request["rl_type"] == "admin": log.info("Create rl admin wallet") async with self.service.wallet_state_manager.lock: rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager) success = await rl_admin.admin_create_coin( uint64(int(request["interval"])), uint64(int(request["limit"])), request["pubkey"], uint64(int(request["amount"])), uint64(int(request["fee"])) if "fee" in request else uint64(0), ) asyncio.create_task(self._create_backup_and_upload(host)) assert rl_admin.rl_info.admin_pubkey is not None return { "success": success, "id": rl_admin.id(), "type": rl_admin.type(), "origin": rl_admin.rl_info.rl_origin, "pubkey": rl_admin.rl_info.admin_pubkey.hex(), } elif request["rl_type"] == "user": log.info("Create rl user wallet") async with self.service.wallet_state_manager.lock: rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager) asyncio.create_task(self._create_backup_and_upload(host)) assert rl_user.rl_info.user_pubkey is not None return { "id": rl_user.id(), "type": rl_user.type(), "pubkey": rl_user.rl_info.user_pubkey.hex(), } elif request["wallet_type"] == "did_wallet": if request["did_type"] == "new": backup_dids = [] num_needed = 0 for d in request["backup_dids"]: backup_dids.append(hexstr_to_bytes(d)) if len(backup_dids) > 0: num_needed = uint64(request["num_of_backup_ids_needed"]) async with self.service.wallet_state_manager.lock: did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet( wallet_state_manager, main_wallet, int(request["amount"]), backup_dids, uint64(num_needed), ) my_did = did_wallet.get_my_DID() return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), } elif request["did_type"] == "recovery": async with self.service.wallet_state_manager.lock: did_wallet = await DIDWallet.create_new_did_wallet_from_recovery( wallet_state_manager, main_wallet, request["filename"] ) assert did_wallet.did_info.temp_coin is not None assert did_wallet.did_info.temp_puzhash is not None assert did_wallet.did_info.temp_pubkey is not None my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() coin_list = did_wallet.did_info.temp_coin.as_list() newpuzhash = did_wallet.did_info.temp_puzhash pubkey = did_wallet.did_info.temp_pubkey return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), "coin_name": coin_name, "coin_list": coin_list, "newpuzhash": newpuzhash.hex(), "pubkey": pubkey.hex(), "backup_dids": did_wallet.did_info.backup_ids, "num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed, } ########################################################################################## # Wallet ########################################################################################## async def get_wallet_balance(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(wallet_id) balance = await wallet.get_confirmed_balance(unspent_records) pending_balance = await wallet.get_unconfirmed_balance(unspent_records) spendable_balance = await wallet.get_spendable_balance(unspent_records) pending_change = await wallet.get_pending_change_balance() max_send_amount = await wallet.get_max_send_amount(unspent_records) unconfirmed_removals: Dict[ bytes32, Coin ] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id) wallet_balance = { "wallet_id": wallet_id, "confirmed_wallet_balance": balance, "unconfirmed_wallet_balance": pending_balance, "spendable_balance": spendable_balance, "pending_change": pending_change, "max_send_amount": max_send_amount, "unspent_coin_count": len(unspent_records), "pending_coin_removal_count": len(unconfirmed_removals), } return {"wallet_balance": wallet_balance} async def get_transaction(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"])) tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id) if tr is None: raise ValueError(f"Transaction 0x{transaction_id.hex()} not found") return { "transaction": tr, "transaction_id": tr.name, } async def get_transactions(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) if "start" in request: start = request["start"] else: start = 0 if "end" in request: end = request["end"] else: end = 50 transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between(wallet_id, start, end) formatted_transactions = [] selected = self.service.config["selected_network"] prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"] for tx in transactions: formatted = tx.to_json_dict() formatted["to_address"] = encode_puzzle_hash(tx.to_puzzle_hash, prefix) formatted_transactions.append(formatted) return { "transactions": formatted_transactions, "wallet_id": wallet_id, } async def get_initial_freeze_period(self, _: Dict): freeze_period = self.service.constants.INITIAL_FREEZE_END_TIMESTAMP return {"INITIAL_FREEZE_END_TIMESTAMP": freeze_period} async def get_next_address(self, request: Dict) -> Dict: """ Returns a new address """ assert self.service.wallet_state_manager is not None if request["new_address"] is True: create_new = True else: create_new = False wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] selected = self.service.config["selected_network"] prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"] if wallet.type() == WalletType.STANDARD_WALLET: raw_puzzle_hash = await wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) elif wallet.type() == WalletType.COLOURED_COIN: raw_puzzle_hash = await wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) else: raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes") return { "wallet_id": wallet_id, "address": address, } async def send_transaction(self, request): assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before sending transactions") if int(time.time()) < self.service.constants.INITIAL_FREEZE_END_TIMESTAMP: end_date = datetime.fromtimestamp(float(self.service.constants.INITIAL_FREEZE_END_TIMESTAMP)) raise ValueError(f"No transactions before: {end_date}") wallet_id = int(request["wallet_id"]) wallet = self.service.wallet_state_manager.wallets[wallet_id] if not isinstance(request["amount"], int) or not isinstance(request["fee"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) puzzle_hash: bytes32 = decode_puzzle_hash(request["address"]) if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee) await wallet.push_transaction(tx) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return { "transaction": tx, "transaction_id": tx.name, } async def get_transaction_count(self, request): wallet_id = int(request["wallet_id"]) count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id) return {"wallet_id": wallet_id, "count": count} async def create_backup(self, request): assert self.service.wallet_state_manager is not None file_path = Path(request["file_path"]) await self.service.wallet_state_manager.create_wallet_backup(file_path) return {} ########################################################################################## # Coloured Coins and Trading ########################################################################################## async def cc_set_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] await wallet.set_name(str(request["name"])) return {"wallet_id": wallet_id} async def cc_get_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] name: str = await wallet.get_name() return {"wallet_id": wallet_id, "name": name} async def cc_spend(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"]) if not isinstance(request["amount"], int) or not isinstance(request["amount"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: tx: TransactionRecord = await wallet.generate_signed_transaction([amount], [puzzle_hash], fee) await wallet.push_transaction(tx) return { "transaction": tx, "transaction_id": tx.name, } async def cc_get_colour(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] colour: str = wallet.get_colour() return {"colour": colour, "wallet_id": wallet_id} async def create_offer_for_ids(self, request): assert self.service.wallet_state_manager is not None offer = request["ids"] file_name = request["filename"] async with self.service.wallet_state_manager.lock: ( success, spend_bundle, error, ) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids(offer, file_name) if success: self.service.wallet_state_manager.trade_manager.write_offer_to_disk(Path(file_name), spend_bundle) return {} raise ValueError(error) async def get_discrepancies_for_offer(self, request): assert self.service.wallet_state_manager is not None file_name = request["filename"] file_path = Path(file_name) async with self.service.wallet_state_manager.lock: ( success, discrepancies, error, ) = await self.service.wallet_state_manager.trade_manager.get_discrepancies_for_offer(file_path) if success: return {"discrepancies": discrepancies} raise ValueError(error) async def respond_to_offer(self, request): assert self.service.wallet_state_manager is not None file_path = Path(request["filename"]) async with self.service.wallet_state_manager.lock: ( success, trade_record, error, ) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(file_path) if not success: raise ValueError(error) return {} async def get_trade(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager trade_id = request["trade_id"] trade: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(trade_id) if trade is None: raise ValueError(f"No trade with trade id: {trade_id}") result = trade_record_to_dict(trade) return {"trade": result} async def get_all_trades(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager all_trades = await trade_mgr.get_all_trades() result = [] for trade in all_trades: result.append(trade_record_to_dict(trade)) return {"trades": result} async def cancel_trade(self, request: Dict): assert self.service.wallet_state_manager is not None wsm = self.service.wallet_state_manager secure = request["secure"] trade_id = hexstr_to_bytes(request["trade_id"]) async with self.service.wallet_state_manager.lock: if secure: await wsm.trade_manager.cancel_pending_offer_safely(trade_id) else: await wsm.trade_manager.cancel_pending_offer(trade_id) return {} async def get_backup_info(self, request: Dict): file_path = Path(request["file_path"]) sk = None if "words" in request: mnemonic = request["words"] passphrase = "" try: sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase) except KeyError as e: return { "success": False, "error": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0], } elif "fingerprint" in request: sk, seed = await self._get_private_key(request["fingerprint"]) if sk is None: raise ValueError("Unable to decrypt the backup file.") backup_info = get_backup_info(file_path, sk) return {"backup_info": backup_info} ########################################################################################## # Distributed Identities ########################################################################################## async def did_update_recovery_ids(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = [] for _ in request["new_list"]: recovery_list.append(hexstr_to_bytes(_)) if "num_verifications_required" in request: new_amount_verifications_required = uint64(request["num_verifications_required"]) else: new_amount_verifications_required = len(recovery_list) async with self.service.wallet_state_manager.lock: success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required) # Update coin with new ID info updated_puz = await wallet.get_new_puzzle() spend_bundle = await wallet.create_spend(updated_puz.get_tree_hash()) if spend_bundle is not None and success: return {"success": True} return {"success": False} async def did_spend(self, request): wallet_id = int(request["wallet_id"]) async with self.service.wallet_state_manager.lock: wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] spend_bundle = await wallet.create_spend(request["puzzlehash"]) if spend_bundle is not None: return {"success": True} return {"success": False} async def did_get_did(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did: str = wallet.get_my_DID() async with self.service.wallet_state_manager.lock: coins = await wallet.select_coins(1) if coins is None or coins == set(): return {"success": True, "wallet_id": wallet_id, "my_did": my_did} else: coin = coins.pop() return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()} async def did_get_recovery_list(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = wallet.did_info.backup_ids recover_hex_list = [] for _ in recovery_list: recover_hex_list.append(_.hex()) return { "success": True, "wallet_id": wallet_id, "recover_list": recover_hex_list, "num_required": wallet.did_info.num_of_backup_ids_needed, } async def did_recovery_spend(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed: return {"success": False, "reason": "insufficient messages"} async with self.service.wallet_state_manager.lock: ( info_list, message_spend_bundle, ) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"]) if "pubkey" in request: pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) else: assert wallet.did_info.temp_pubkey is not None pubkey = wallet.did_info.temp_pubkey if "puzhash" in request: puzhash = hexstr_to_bytes(request["puzhash"]) else: assert wallet.did_info.temp_puzhash is not None puzhash = wallet.did_info.temp_puzhash success = await wallet.recovery_spend( wallet.did_info.temp_coin, puzhash, info_list, pubkey, message_spend_bundle, ) return {"success": success} async def did_get_pubkey(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex() return {"success": True, "pubkey": pubkey} async def did_create_attest(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: info = await wallet.get_info_for_recovery() coin = hexstr_to_bytes(request["coin_name"]) pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) spend_bundle = await wallet.create_attestment( coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"] ) if spend_bundle is not None: return { "success": True, "message_spend_bundle": bytes(spend_bundle).hex(), "info": [info[0].hex(), info[1].hex(), info[2]], } else: return {"success": False} async def did_get_information_needed_for_recovery(self, request): wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() return { "success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_name": coin_name, "newpuzhash": did_wallet.did_info.temp_puzhash, "pubkey": did_wallet.did_info.temp_pubkey, "backup_dids": did_wallet.did_info.backup_ids, } async def did_create_backup_file(self, request): try: wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] did_wallet.create_backup(request["filename"]) return {"wallet_id": wallet_id, "success": True} except Exception: return {"wallet_id": wallet_id, "success": False} ########################################################################################## # Rate Limited Wallet ########################################################################################## async def rl_set_user_info(self, request): assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) rl_user = self.service.wallet_state_manager.wallets[wallet_id] origin = request["origin"] async with self.service.wallet_state_manager.lock: await rl_user.set_user_info( uint64(request["interval"]), uint64(request["limit"]), origin["parent_coin_info"], origin["puzzle_hash"], origin["amount"], request["admin_pubkey"], ) return {} async def send_clawback_transaction(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] fee = int(request["fee"]) async with self.service.wallet_state_manager.lock: tx = await wallet.clawback_rl_coin_transaction(fee) await wallet.push_transaction(tx) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return { "transaction": tx, "transaction_id": tx.name, } async def add_rate_limited_funds(self, request): wallet_id = uint32(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash) request["wallet_id"] = 1 request["puzzle_hash"] = puzzle_hash async with self.service.wallet_state_manager.lock: await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"]) return {"status": "SUCCESS"} async def get_farmed_amount(self, request): tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards() amount = 0 pool_reward_amount = 0 farmer_reward_amount = 0 fee_amount = 0 last_height_farmed = 0 for record in tx_records: height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE) if height > last_height_farmed: last_height_farmed = height if record.type == TransactionType.COINBASE_REWARD: pool_reward_amount += record.amount if record.type == TransactionType.FEE_REWARD: fee_amount += record.amount - calculate_base_farmer_reward(height) farmer_reward_amount += calculate_base_farmer_reward(height) amount += record.amount assert amount == pool_reward_amount + farmer_reward_amount + fee_amount return { "farmed_amount": amount, "pool_reward_amount": pool_reward_amount, "farmer_reward_amount": farmer_reward_amount, "fee_amount": fee_amount, "last_height_farmed": last_height_farmed, } async def create_signed_transaction(self, request): if "additions" not in request or len(request["additions"]) < 1: raise ValueError("Specify additions list") additions: List[Dict] = request["additions"] amount_0: uint64 = uint64(additions[0]["amount"]) assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT puzzle_hash_0 = hexstr_to_bytes(additions[0]["puzzle_hash"]) if len(puzzle_hash_0) != 32: raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0}") additional_outputs = [] for addition in additions[1:]: receiver_ph = hexstr_to_bytes(addition["puzzle_hash"]) if len(receiver_ph) != 32: raise ValueError(f"Address must be 32 bytes. {receiver_ph}") amount = uint64(addition["amount"]) if amount > self.service.constants.MAX_COIN_AMOUNT: raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}") additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount}) fee = uint64(0) if "fee" in request: fee = uint64(request["fee"]) coins = None if "coins" in request and len(request["coins"]) > 0: coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]]) async with self.service.wallet_state_manager.lock: signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction( amount_0, puzzle_hash_0, fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs ) return {"signed_tx": signed_tx}
44.476626
120
0.60882
import asyncio import logging import time from datetime import datetime from pathlib import Path from typing import Callable, Dict, List, Optional, Tuple from blspy import PrivateKey, G1Element from seno.cmds.init_funcs import check_keys from seno.consensus.block_rewards import calculate_base_farmer_reward from seno.protocols.protocol_message_types import ProtocolMessageTypes from seno.server.outbound_message import NodeType, make_msg from seno.simulator.simulator_protocol import FarmNewBlockProtocol from seno.types.blockchain_format.coin import Coin from seno.types.blockchain_format.sized_bytes import bytes32 from seno.util.bech32m import decode_puzzle_hash, encode_puzzle_hash from seno.util.byte_types import hexstr_to_bytes from seno.util.ints import uint32, uint64 from seno.util.keychain import bytes_to_mnemonic, generate_mnemonic from seno.util.path import path_from_root from seno.util.ws_message import WsRpcMessage, create_payload_dict from seno.wallet.cc_wallet.cc_wallet import CCWallet from seno.wallet.rl_wallet.rl_wallet import RLWallet from seno.wallet.did_wallet.did_wallet import DIDWallet from seno.wallet.trade_record import TradeRecord from seno.wallet.transaction_record import TransactionRecord from seno.wallet.util.backup_utils import download_backup, get_backup_info, upload_backup from seno.wallet.util.trade_utils import trade_record_to_dict from seno.wallet.util.transaction_type import TransactionType from seno.wallet.util.wallet_types import WalletType from seno.wallet.wallet_info import WalletInfo from seno.wallet.wallet_node import WalletNode TIMEOUT = 30 log = logging.getLogger(__name__) class WalletRpcApi: def __init__(self, wallet_node: WalletNode): assert wallet_node is not None self.service = wallet_node self.service_name = "seno_wallet" def get_routes(self) -> Dict[str, Callable]: return { "/log_in": self.log_in, "/get_public_keys": self.get_public_keys, "/get_private_key": self.get_private_key, "/generate_mnemonic": self.generate_mnemonic, "/add_key": self.add_key, "/delete_key": self.delete_key, "/delete_all_keys": self.delete_all_keys, "/get_sync_status": self.get_sync_status, "/get_height_info": self.get_height_info, "/farm_block": self.farm_block, "/get_initial_freeze_period": self.get_initial_freeze_period, "/get_network_info": self.get_network_info, "/get_wallets": self.get_wallets, "/create_new_wallet": self.create_new_wallet, "/get_wallet_balance": self.get_wallet_balance, "/get_transaction": self.get_transaction, "/get_transactions": self.get_transactions, "/get_next_address": self.get_next_address, "/send_transaction": self.send_transaction, "/create_backup": self.create_backup, "/get_transaction_count": self.get_transaction_count, "/get_farmed_amount": self.get_farmed_amount, "/create_signed_transaction": self.create_signed_transaction, "/cc_set_name": self.cc_set_name, "/cc_get_name": self.cc_get_name, "/cc_spend": self.cc_spend, "/cc_get_colour": self.cc_get_colour, "/create_offer_for_ids": self.create_offer_for_ids, "/get_discrepancies_for_offer": self.get_discrepancies_for_offer, "/respond_to_offer": self.respond_to_offer, "/get_trade": self.get_trade, "/get_all_trades": self.get_all_trades, "/cancel_trade": self.cancel_trade, "/did_update_recovery_ids": self.did_update_recovery_ids, "/did_spend": self.did_spend, "/did_get_pubkey": self.did_get_pubkey, "/did_get_did": self.did_get_did, "/did_recovery_spend": self.did_recovery_spend, "/did_get_recovery_list": self.did_get_recovery_list, "/did_create_attest": self.did_create_attest, "/did_get_information_needed_for_recovery": self.did_get_information_needed_for_recovery, "/did_create_backup_file": self.did_create_backup_file, "/rl_set_user_info": self.rl_set_user_info, "/send_clawback_transaction:": self.send_clawback_transaction, "/add_rate_limited_funds:": self.add_rate_limited_funds, } async def _state_changed(self, *args) -> List[WsRpcMessage]: if len(args) < 2: return [] data = { "state": args[0], } if args[1] is not None: data["wallet_id"] = args[1] if args[2] is not None: data["additional_data"] = args[2] return [create_payload_dict("state_changed", data, "seno_wallet", "wallet_ui")] async def _stop_wallet(self): if self.service is not None: self.service._close() await self.service._await_closed() async def log_in(self, request): fingerprint = request["fingerprint"] if self.service.logged_in_fingerprint == fingerprint: return {"fingerprint": fingerprint} await self._stop_wallet() log_in_type = request["type"] recovery_host = request["host"] testing = False if "testing" in self.service.config and self.service.config["testing"] is True: testing = True if log_in_type == "skip": started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True) elif log_in_type == "restore_backup": file_path = Path(request["file_path"]) started = await self.service._start(fingerprint=fingerprint, backup_file=file_path) else: started = await self.service._start(fingerprint) if started is True: return {"fingerprint": fingerprint} elif testing is True and self.service.backup_initialized is False: response = {"success": False, "error": "not_initialized"} return response elif self.service.backup_initialized is False: backup_info = None backup_path = None try: private_key = self.service.get_key_for_fingerprint(fingerprint) last_recovery = await download_backup(recovery_host, private_key) backup_path = path_from_root(self.service.root_path, "last_recovery") if backup_path.exists(): backup_path.unlink() backup_path.write_text(last_recovery) backup_info = get_backup_info(backup_path, private_key) backup_info["backup_host"] = recovery_host backup_info["downloaded"] = True except Exception as e: log.error(f"error {e}") response = {"success": False, "error": "not_initialized"} if backup_info is not None: response["backup_info"] = backup_info response["backup_path"] = f"{backup_path}" return response return {"success": False, "error": "Unknown Error"} async def get_public_keys(self, request: Dict): fingerprints = [sk.get_g1().get_fingerprint() for (sk, seed) in self.service.keychain.get_all_private_keys()] return {"public_key_fingerprints": fingerprints} async def _get_private_key(self, fingerprint) -> Tuple[Optional[PrivateKey], Optional[bytes]]: for sk, seed in self.service.keychain.get_all_private_keys(): if sk.get_g1().get_fingerprint() == fingerprint: return sk, seed return None, None async def get_private_key(self, request): fingerprint = request["fingerprint"] sk, seed = await self._get_private_key(fingerprint) if sk is not None: s = bytes_to_mnemonic(seed) if seed is not None else None return { "private_key": { "fingerprint": fingerprint, "sk": bytes(sk).hex(), "pk": bytes(sk.get_g1()).hex(), "seed": s, }, } return {"success": False, "private_key": {"fingerprint": fingerprint}} async def generate_mnemonic(self, request: Dict): return {"mnemonic": generate_mnemonic().split(" ")} async def add_key(self, request): if "mnemonic" not in request: raise ValueError("Mnemonic not in request") mnemonic = request["mnemonic"] passphrase = "" try: sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase) except KeyError as e: return { "success": False, "error": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0], } fingerprint = sk.get_g1().get_fingerprint() await self._stop_wallet() # Makes sure the new key is added to config properly started = False check_keys(self.service.root_path) request_type = request["type"] if request_type == "new_wallet": started = await self.service._start(fingerprint=fingerprint, new_wallet=True) elif request_type == "skip": started = await self.service._start(fingerprint=fingerprint, skip_backup_import=True) elif request_type == "restore_backup": file_path = Path(request["file_path"]) started = await self.service._start(fingerprint=fingerprint, backup_file=file_path) if started is True: return {"fingerprint": fingerprint} raise ValueError("Failed to start") async def delete_key(self, request): await self._stop_wallet() fingerprint = request["fingerprint"] self.service.keychain.delete_key_by_fingerprint(fingerprint) path = path_from_root( self.service.root_path, f"{self.service.config['database_path']}-{fingerprint}", ) if path.exists(): path.unlink() return {} async def delete_all_keys(self, request: Dict): await self._stop_wallet() self.service.keychain.delete_all_keys() path = path_from_root(self.service.root_path, self.service.config["database_path"]) if path.exists(): path.unlink() return {} ########################################################################################## # Wallet Node ########################################################################################## async def get_sync_status(self, request: Dict): assert self.service.wallet_state_manager is not None syncing = self.service.wallet_state_manager.sync_mode synced = await self.service.wallet_state_manager.synced() return {"synced": synced, "syncing": syncing, "genesis_initialized": True} async def get_height_info(self, request: Dict): assert self.service.wallet_state_manager is not None peak = self.service.wallet_state_manager.peak if peak is None: return {"height": 0} else: return {"height": peak.height} async def get_network_info(self, request: Dict): assert self.service.wallet_state_manager is not None network_name = self.service.config["selected_network"] address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"] return {"network_name": network_name, "network_prefix": address_prefix} async def farm_block(self, request): raw_puzzle_hash = decode_puzzle_hash(request["address"]) request = FarmNewBlockProtocol(raw_puzzle_hash) msg = make_msg(ProtocolMessageTypes.farm_new_block, request) await self.service.server.send_to_all([msg], NodeType.FULL_NODE) return {} ########################################################################################## # Wallet Management ########################################################################################## async def get_wallets(self, request: Dict): assert self.service.wallet_state_manager is not None wallets: List[WalletInfo] = await self.service.wallet_state_manager.get_all_wallet_info_entries() return {"wallets": wallets} async def _create_backup_and_upload(self, host) -> None: assert self.service.wallet_state_manager is not None try: if "testing" in self.service.config and self.service.config["testing"] is True: return None now = time.time() file_name = f"backup_{now}" path = path_from_root(self.service.root_path, file_name) await self.service.wallet_state_manager.create_wallet_backup(path) backup_text = path.read_text() response = await upload_backup(host, backup_text) success = response["success"] if success is False: log.error("Failed to upload backup to wallet backup service") elif success is True: log.info("Finished upload of the backup file") except Exception as e: log.error(f"Exception in upload backup. Error: {e}") async def create_new_wallet(self, request: Dict): assert self.service.wallet_state_manager is not None wallet_state_manager = self.service.wallet_state_manager main_wallet = wallet_state_manager.main_wallet host = request["host"] if request["wallet_type"] == "cc_wallet": if request["mode"] == "new": async with self.service.wallet_state_manager.lock: cc_wallet: CCWallet = await CCWallet.create_new_cc( wallet_state_manager, main_wallet, request["amount"] ) colour = cc_wallet.get_colour() asyncio.create_task(self._create_backup_and_upload(host)) return { "type": cc_wallet.type(), "colour": colour, "wallet_id": cc_wallet.id(), } elif request["mode"] == "existing": async with self.service.wallet_state_manager.lock: cc_wallet = await CCWallet.create_wallet_for_cc( wallet_state_manager, main_wallet, request["colour"] ) asyncio.create_task(self._create_backup_and_upload(host)) return {"type": cc_wallet.type()} elif request["wallet_type"] == "rl_wallet": if request["rl_type"] == "admin": log.info("Create rl admin wallet") async with self.service.wallet_state_manager.lock: rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_state_manager) success = await rl_admin.admin_create_coin( uint64(int(request["interval"])), uint64(int(request["limit"])), request["pubkey"], uint64(int(request["amount"])), uint64(int(request["fee"])) if "fee" in request else uint64(0), ) asyncio.create_task(self._create_backup_and_upload(host)) assert rl_admin.rl_info.admin_pubkey is not None return { "success": success, "id": rl_admin.id(), "type": rl_admin.type(), "origin": rl_admin.rl_info.rl_origin, "pubkey": rl_admin.rl_info.admin_pubkey.hex(), } elif request["rl_type"] == "user": log.info("Create rl user wallet") async with self.service.wallet_state_manager.lock: rl_user: RLWallet = await RLWallet.create_rl_user(wallet_state_manager) asyncio.create_task(self._create_backup_and_upload(host)) assert rl_user.rl_info.user_pubkey is not None return { "id": rl_user.id(), "type": rl_user.type(), "pubkey": rl_user.rl_info.user_pubkey.hex(), } elif request["wallet_type"] == "did_wallet": if request["did_type"] == "new": backup_dids = [] num_needed = 0 for d in request["backup_dids"]: backup_dids.append(hexstr_to_bytes(d)) if len(backup_dids) > 0: num_needed = uint64(request["num_of_backup_ids_needed"]) async with self.service.wallet_state_manager.lock: did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet( wallet_state_manager, main_wallet, int(request["amount"]), backup_dids, uint64(num_needed), ) my_did = did_wallet.get_my_DID() return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), } elif request["did_type"] == "recovery": async with self.service.wallet_state_manager.lock: did_wallet = await DIDWallet.create_new_did_wallet_from_recovery( wallet_state_manager, main_wallet, request["filename"] ) assert did_wallet.did_info.temp_coin is not None assert did_wallet.did_info.temp_puzhash is not None assert did_wallet.did_info.temp_pubkey is not None my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() coin_list = did_wallet.did_info.temp_coin.as_list() newpuzhash = did_wallet.did_info.temp_puzhash pubkey = did_wallet.did_info.temp_pubkey return { "success": True, "type": did_wallet.type(), "my_did": my_did, "wallet_id": did_wallet.id(), "coin_name": coin_name, "coin_list": coin_list, "newpuzhash": newpuzhash.hex(), "pubkey": pubkey.hex(), "backup_dids": did_wallet.did_info.backup_ids, "num_verifications_required": did_wallet.did_info.num_of_backup_ids_needed, } ########################################################################################## # Wallet ########################################################################################## async def get_wallet_balance(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: unspent_records = await self.service.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(wallet_id) balance = await wallet.get_confirmed_balance(unspent_records) pending_balance = await wallet.get_unconfirmed_balance(unspent_records) spendable_balance = await wallet.get_spendable_balance(unspent_records) pending_change = await wallet.get_pending_change_balance() max_send_amount = await wallet.get_max_send_amount(unspent_records) unconfirmed_removals: Dict[ bytes32, Coin ] = await wallet.wallet_state_manager.unconfirmed_removals_for_wallet(wallet_id) wallet_balance = { "wallet_id": wallet_id, "confirmed_wallet_balance": balance, "unconfirmed_wallet_balance": pending_balance, "spendable_balance": spendable_balance, "pending_change": pending_change, "max_send_amount": max_send_amount, "unspent_coin_count": len(unspent_records), "pending_coin_removal_count": len(unconfirmed_removals), } return {"wallet_balance": wallet_balance} async def get_transaction(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None transaction_id: bytes32 = bytes32(hexstr_to_bytes(request["transaction_id"])) tr: Optional[TransactionRecord] = await self.service.wallet_state_manager.get_transaction(transaction_id) if tr is None: raise ValueError(f"Transaction 0x{transaction_id.hex()} not found") return { "transaction": tr, "transaction_id": tr.name, } async def get_transactions(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) if "start" in request: start = request["start"] else: start = 0 if "end" in request: end = request["end"] else: end = 50 transactions = await self.service.wallet_state_manager.tx_store.get_transactions_between(wallet_id, start, end) formatted_transactions = [] selected = self.service.config["selected_network"] prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"] for tx in transactions: formatted = tx.to_json_dict() formatted["to_address"] = encode_puzzle_hash(tx.to_puzzle_hash, prefix) formatted_transactions.append(formatted) return { "transactions": formatted_transactions, "wallet_id": wallet_id, } async def get_initial_freeze_period(self, _: Dict): freeze_period = self.service.constants.INITIAL_FREEZE_END_TIMESTAMP return {"INITIAL_FREEZE_END_TIMESTAMP": freeze_period} async def get_next_address(self, request: Dict) -> Dict: assert self.service.wallet_state_manager is not None if request["new_address"] is True: create_new = True else: create_new = False wallet_id = uint32(int(request["wallet_id"])) wallet = self.service.wallet_state_manager.wallets[wallet_id] selected = self.service.config["selected_network"] prefix = self.service.config["network_overrides"]["config"][selected]["address_prefix"] if wallet.type() == WalletType.STANDARD_WALLET: raw_puzzle_hash = await wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) elif wallet.type() == WalletType.COLOURED_COIN: raw_puzzle_hash = await wallet.get_puzzle_hash(create_new) address = encode_puzzle_hash(raw_puzzle_hash, prefix) else: raise ValueError(f"Wallet type {wallet.type()} cannot create puzzle hashes") return { "wallet_id": wallet_id, "address": address, } async def send_transaction(self, request): assert self.service.wallet_state_manager is not None if await self.service.wallet_state_manager.synced() is False: raise ValueError("Wallet needs to be fully synced before sending transactions") if int(time.time()) < self.service.constants.INITIAL_FREEZE_END_TIMESTAMP: end_date = datetime.fromtimestamp(float(self.service.constants.INITIAL_FREEZE_END_TIMESTAMP)) raise ValueError(f"No transactions before: {end_date}") wallet_id = int(request["wallet_id"]) wallet = self.service.wallet_state_manager.wallets[wallet_id] if not isinstance(request["amount"], int) or not isinstance(request["fee"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) puzzle_hash: bytes32 = decode_puzzle_hash(request["address"]) if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: tx: TransactionRecord = await wallet.generate_signed_transaction(amount, puzzle_hash, fee) await wallet.push_transaction(tx) # Transaction may not have been included in the mempool yet. Use get_transaction to check. return { "transaction": tx, "transaction_id": tx.name, } async def get_transaction_count(self, request): wallet_id = int(request["wallet_id"]) count = await self.service.wallet_state_manager.tx_store.get_transaction_count_for_wallet(wallet_id) return {"wallet_id": wallet_id, "count": count} async def create_backup(self, request): assert self.service.wallet_state_manager is not None file_path = Path(request["file_path"]) await self.service.wallet_state_manager.create_wallet_backup(file_path) return {} ########################################################################################## # Coloured Coins and Trading ########################################################################################## async def cc_set_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] await wallet.set_name(str(request["name"])) return {"wallet_id": wallet_id} async def cc_get_name(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] name: str = await wallet.get_name() return {"wallet_id": wallet_id, "name": name} async def cc_spend(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash: bytes32 = decode_puzzle_hash(request["inner_address"]) if not isinstance(request["amount"], int) or not isinstance(request["amount"], int): raise ValueError("An integer amount or fee is required (too many decimals)") amount: uint64 = uint64(request["amount"]) if "fee" in request: fee = uint64(request["fee"]) else: fee = uint64(0) async with self.service.wallet_state_manager.lock: tx: TransactionRecord = await wallet.generate_signed_transaction([amount], [puzzle_hash], fee) await wallet.push_transaction(tx) return { "transaction": tx, "transaction_id": tx.name, } async def cc_get_colour(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: CCWallet = self.service.wallet_state_manager.wallets[wallet_id] colour: str = wallet.get_colour() return {"colour": colour, "wallet_id": wallet_id} async def create_offer_for_ids(self, request): assert self.service.wallet_state_manager is not None offer = request["ids"] file_name = request["filename"] async with self.service.wallet_state_manager.lock: ( success, spend_bundle, error, ) = await self.service.wallet_state_manager.trade_manager.create_offer_for_ids(offer, file_name) if success: self.service.wallet_state_manager.trade_manager.write_offer_to_disk(Path(file_name), spend_bundle) return {} raise ValueError(error) async def get_discrepancies_for_offer(self, request): assert self.service.wallet_state_manager is not None file_name = request["filename"] file_path = Path(file_name) async with self.service.wallet_state_manager.lock: ( success, discrepancies, error, ) = await self.service.wallet_state_manager.trade_manager.get_discrepancies_for_offer(file_path) if success: return {"discrepancies": discrepancies} raise ValueError(error) async def respond_to_offer(self, request): assert self.service.wallet_state_manager is not None file_path = Path(request["filename"]) async with self.service.wallet_state_manager.lock: ( success, trade_record, error, ) = await self.service.wallet_state_manager.trade_manager.respond_to_offer(file_path) if not success: raise ValueError(error) return {} async def get_trade(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager trade_id = request["trade_id"] trade: Optional[TradeRecord] = await trade_mgr.get_trade_by_id(trade_id) if trade is None: raise ValueError(f"No trade with trade id: {trade_id}") result = trade_record_to_dict(trade) return {"trade": result} async def get_all_trades(self, request: Dict): assert self.service.wallet_state_manager is not None trade_mgr = self.service.wallet_state_manager.trade_manager all_trades = await trade_mgr.get_all_trades() result = [] for trade in all_trades: result.append(trade_record_to_dict(trade)) return {"trades": result} async def cancel_trade(self, request: Dict): assert self.service.wallet_state_manager is not None wsm = self.service.wallet_state_manager secure = request["secure"] trade_id = hexstr_to_bytes(request["trade_id"]) async with self.service.wallet_state_manager.lock: if secure: await wsm.trade_manager.cancel_pending_offer_safely(trade_id) else: await wsm.trade_manager.cancel_pending_offer(trade_id) return {} async def get_backup_info(self, request: Dict): file_path = Path(request["file_path"]) sk = None if "words" in request: mnemonic = request["words"] passphrase = "" try: sk = self.service.keychain.add_private_key(" ".join(mnemonic), passphrase) except KeyError as e: return { "success": False, "error": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0], } elif "fingerprint" in request: sk, seed = await self._get_private_key(request["fingerprint"]) if sk is None: raise ValueError("Unable to decrypt the backup file.") backup_info = get_backup_info(file_path, sk) return {"backup_info": backup_info} async def did_update_recovery_ids(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = [] for _ in request["new_list"]: recovery_list.append(hexstr_to_bytes(_)) if "num_verifications_required" in request: new_amount_verifications_required = uint64(request["num_verifications_required"]) else: new_amount_verifications_required = len(recovery_list) async with self.service.wallet_state_manager.lock: success = await wallet.update_recovery_list(recovery_list, new_amount_verifications_required) updated_puz = await wallet.get_new_puzzle() spend_bundle = await wallet.create_spend(updated_puz.get_tree_hash()) if spend_bundle is not None and success: return {"success": True} return {"success": False} async def did_spend(self, request): wallet_id = int(request["wallet_id"]) async with self.service.wallet_state_manager.lock: wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] spend_bundle = await wallet.create_spend(request["puzzlehash"]) if spend_bundle is not None: return {"success": True} return {"success": False} async def did_get_did(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did: str = wallet.get_my_DID() async with self.service.wallet_state_manager.lock: coins = await wallet.select_coins(1) if coins is None or coins == set(): return {"success": True, "wallet_id": wallet_id, "my_did": my_did} else: coin = coins.pop() return {"success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_id": coin.name()} async def did_get_recovery_list(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] recovery_list = wallet.did_info.backup_ids recover_hex_list = [] for _ in recovery_list: recover_hex_list.append(_.hex()) return { "success": True, "wallet_id": wallet_id, "recover_list": recover_hex_list, "num_required": wallet.did_info.num_of_backup_ids_needed, } async def did_recovery_spend(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] if len(request["attest_filenames"]) < wallet.did_info.num_of_backup_ids_needed: return {"success": False, "reason": "insufficient messages"} async with self.service.wallet_state_manager.lock: ( info_list, message_spend_bundle, ) = await wallet.load_attest_files_for_recovery_spend(request["attest_filenames"]) if "pubkey" in request: pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) else: assert wallet.did_info.temp_pubkey is not None pubkey = wallet.did_info.temp_pubkey if "puzhash" in request: puzhash = hexstr_to_bytes(request["puzhash"]) else: assert wallet.did_info.temp_puzhash is not None puzhash = wallet.did_info.temp_puzhash success = await wallet.recovery_spend( wallet.did_info.temp_coin, puzhash, info_list, pubkey, message_spend_bundle, ) return {"success": success} async def did_get_pubkey(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] pubkey = bytes((await wallet.wallet_state_manager.get_unused_derivation_record(wallet_id)).pubkey).hex() return {"success": True, "pubkey": pubkey} async def did_create_attest(self, request): wallet_id = int(request["wallet_id"]) wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] async with self.service.wallet_state_manager.lock: info = await wallet.get_info_for_recovery() coin = hexstr_to_bytes(request["coin_name"]) pubkey = G1Element.from_bytes(hexstr_to_bytes(request["pubkey"])) spend_bundle = await wallet.create_attestment( coin, hexstr_to_bytes(request["puzhash"]), pubkey, request["filename"] ) if spend_bundle is not None: return { "success": True, "message_spend_bundle": bytes(spend_bundle).hex(), "info": [info[0].hex(), info[1].hex(), info[2]], } else: return {"success": False} async def did_get_information_needed_for_recovery(self, request): wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] my_did = did_wallet.get_my_DID() coin_name = did_wallet.did_info.temp_coin.name().hex() return { "success": True, "wallet_id": wallet_id, "my_did": my_did, "coin_name": coin_name, "newpuzhash": did_wallet.did_info.temp_puzhash, "pubkey": did_wallet.did_info.temp_pubkey, "backup_dids": did_wallet.did_info.backup_ids, } async def did_create_backup_file(self, request): try: wallet_id = int(request["wallet_id"]) did_wallet: DIDWallet = self.service.wallet_state_manager.wallets[wallet_id] did_wallet.create_backup(request["filename"]) return {"wallet_id": wallet_id, "success": True} except Exception: return {"wallet_id": wallet_id, "success": False} async def rl_set_user_info(self, request): assert self.service.wallet_state_manager is not None wallet_id = uint32(int(request["wallet_id"])) rl_user = self.service.wallet_state_manager.wallets[wallet_id] origin = request["origin"] async with self.service.wallet_state_manager.lock: await rl_user.set_user_info( uint64(request["interval"]), uint64(request["limit"]), origin["parent_coin_info"], origin["puzzle_hash"], origin["amount"], request["admin_pubkey"], ) return {} async def send_clawback_transaction(self, request): assert self.service.wallet_state_manager is not None wallet_id = int(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] fee = int(request["fee"]) async with self.service.wallet_state_manager.lock: tx = await wallet.clawback_rl_coin_transaction(fee) await wallet.push_transaction(tx) return { "transaction": tx, "transaction_id": tx.name, } async def add_rate_limited_funds(self, request): wallet_id = uint32(request["wallet_id"]) wallet: RLWallet = self.service.wallet_state_manager.wallets[wallet_id] puzzle_hash = wallet.rl_get_aggregation_puzzlehash(wallet.rl_info.rl_puzzle_hash) request["wallet_id"] = 1 request["puzzle_hash"] = puzzle_hash async with self.service.wallet_state_manager.lock: await wallet.rl_add_funds(request["amount"], puzzle_hash, request["fee"]) return {"status": "SUCCESS"} async def get_farmed_amount(self, request): tx_records: List[TransactionRecord] = await self.service.wallet_state_manager.tx_store.get_farming_rewards() amount = 0 pool_reward_amount = 0 farmer_reward_amount = 0 fee_amount = 0 last_height_farmed = 0 for record in tx_records: height = record.height_farmed(self.service.constants.GENESIS_CHALLENGE) if height > last_height_farmed: last_height_farmed = height if record.type == TransactionType.COINBASE_REWARD: pool_reward_amount += record.amount if record.type == TransactionType.FEE_REWARD: fee_amount += record.amount - calculate_base_farmer_reward(height) farmer_reward_amount += calculate_base_farmer_reward(height) amount += record.amount assert amount == pool_reward_amount + farmer_reward_amount + fee_amount return { "farmed_amount": amount, "pool_reward_amount": pool_reward_amount, "farmer_reward_amount": farmer_reward_amount, "fee_amount": fee_amount, "last_height_farmed": last_height_farmed, } async def create_signed_transaction(self, request): if "additions" not in request or len(request["additions"]) < 1: raise ValueError("Specify additions list") additions: List[Dict] = request["additions"] amount_0: uint64 = uint64(additions[0]["amount"]) assert amount_0 <= self.service.constants.MAX_COIN_AMOUNT puzzle_hash_0 = hexstr_to_bytes(additions[0]["puzzle_hash"]) if len(puzzle_hash_0) != 32: raise ValueError(f"Address must be 32 bytes. {puzzle_hash_0}") additional_outputs = [] for addition in additions[1:]: receiver_ph = hexstr_to_bytes(addition["puzzle_hash"]) if len(receiver_ph) != 32: raise ValueError(f"Address must be 32 bytes. {receiver_ph}") amount = uint64(addition["amount"]) if amount > self.service.constants.MAX_COIN_AMOUNT: raise ValueError(f"Coin amount cannot exceed {self.service.constants.MAX_COIN_AMOUNT}") additional_outputs.append({"puzzlehash": receiver_ph, "amount": amount}) fee = uint64(0) if "fee" in request: fee = uint64(request["fee"]) coins = None if "coins" in request and len(request["coins"]) > 0: coins = set([Coin.from_json_dict(coin_json) for coin_json in request["coins"]]) async with self.service.wallet_state_manager.lock: signed_tx = await self.service.wallet_state_manager.main_wallet.generate_signed_transaction( amount_0, puzzle_hash_0, fee, coins=coins, ignore_max_send_amount=True, primaries=additional_outputs ) return {"signed_tx": signed_tx}
true
true
f703a3ab24005dbecdddfd6bf01d5f0da3cef21d
1,702
py
Python
tests/test_networks.py
bee-hive/nested-policy-rl
56b0be37ed814265cb3ef26ea0a1a62b5cd7f05c
[ "MIT" ]
1
2022-01-28T16:52:40.000Z
2022-01-28T16:52:40.000Z
tests/test_networks.py
bee-hive/nested-policy-rl
56b0be37ed814265cb3ef26ea0a1a62b5cd7f05c
[ "MIT" ]
null
null
null
tests/test_networks.py
bee-hive/nested-policy-rl
56b0be37ed814265cb3ef26ea0a1a62b5cd7f05c
[ "MIT" ]
null
null
null
import torch import torch.optim as optim import torch.nn.functional as F import torch.nn as nn # import sys # sys.path.append("../simulated_fqi/") from simulated_fqi import NFQNetwork, ContrastiveNFQNetwork import matplotlib.pyplot as plt import numpy as np def train(x, y, groups, network, optimizer): predicted_q_values = network(x, groups).squeeze() loss = F.mse_loss(predicted_q_values, y) optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() # def test_contrastive_network(): # # Setup agent # network = ContrastiveNFQNetwork(state_dim=0, is_contrastive=True, nonlinearity=nn.Identity) # optimizer = optim.Rprop(network.parameters()) # # Generate data # n, m = 100, 100 # beta_shared = -1 # beta_fg = 2.1 # x_bg, x_fg = np.linspace(-3, 3, m), np.linspace(-3, 3, n) # x = np.concatenate([x_bg, x_fg]) # groups = np.concatenate([np.zeros(m), np.ones(n)]) # y = beta_shared * x + beta_fg * groups * x# + np.random.normal(scale=0.5, size=m+n) # x = torch.FloatTensor(x).unsqueeze(1) # y = torch.FloatTensor(y) # groups = torch.FloatTensor(groups).unsqueeze(1) # for epoch in range(200): # loss = train(x, y, groups, network, optimizer) # # if epoch % 10 == 0: # # print("Epoch: {:4d}, Loss: {:4f}".format(epoch, loss)) # network.eval() # with torch.no_grad(): # preds = network(x, groups) # assert np.allclose(preds.squeeze().numpy(), y.squeeze().numpy(), atol=1e-4) # plt.scatter(x, preds, c=groups) # plt.show() # import ipdb; ipdb.set_trace() if __name__ == "__main__": test_contrastive_network()
27.901639
97
0.632197
import torch import torch.optim as optim import torch.nn.functional as F import torch.nn as nn from simulated_fqi import NFQNetwork, ContrastiveNFQNetwork import matplotlib.pyplot as plt import numpy as np def train(x, y, groups, network, optimizer): predicted_q_values = network(x, groups).squeeze() loss = F.mse_loss(predicted_q_values, y) optimizer.zero_grad() loss.backward() optimizer.step() return loss.item() if __name__ == "__main__": test_contrastive_network()
true
true
f703a3ad013a7914e2e45a7ac2c29a6df6b1ce61
381
py
Python
backend/tw/wsgi.py
gitdevstar/tikatok
78729028f20eda822d9ef36634685feb69d5a3a5
[ "Apache-2.0" ]
null
null
null
backend/tw/wsgi.py
gitdevstar/tikatok
78729028f20eda822d9ef36634685feb69d5a3a5
[ "Apache-2.0" ]
null
null
null
backend/tw/wsgi.py
gitdevstar/tikatok
78729028f20eda822d9ef36634685feb69d5a3a5
[ "Apache-2.0" ]
null
null
null
""" WSGI config for tw project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tw.settings") application = get_wsgi_application()
22.411765
78
0.779528
import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tw.settings") application = get_wsgi_application()
true
true
f703a3c46f875bef0458a54b32b51c5f0011535b
1,315
py
Python
libs/utils_torch.py
SixHeo/IVOS-ATNet
1cf574953a96bd680c518c6362b510fd103ff271
[ "MIT" ]
31
2020-07-17T09:10:14.000Z
2022-03-19T06:32:09.000Z
libs/utils_torch.py
SixHeo/IVOS-ATNet
1cf574953a96bd680c518c6362b510fd103ff271
[ "MIT" ]
4
2020-09-16T09:50:30.000Z
2021-11-23T06:34:33.000Z
libs/utils_torch.py
SixHeo/IVOS-ATNet
1cf574953a96bd680c518c6362b510fd103ff271
[ "MIT" ]
3
2020-07-17T09:09:38.000Z
2020-08-08T12:43:43.000Z
import torch def combine_masks_with_batch(masks, n_obj, th=0.5, return_as_onehot = False): """ Combine mask for different objects. Different methods are the following: * `max_per_pixel`: Computes the final mask taking the pixel with the highest probability for every object. # Arguments masks: Tensor with shape[B, nobj, H, W]. H, W on batches must be same method: String. Method that specifies how the masks are fused. # Returns [B, 1, H, W] """ # masks : B, nobj, h, w # output : h,w marker = torch.argmax(masks, dim=1, keepdim=True) # if not return_as_onehot: out_mask = torch.unsqueeze(torch.zeros_like(masks)[:,0],1) #[B, 1, H, W] for obj_id in range(n_obj): try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th) except: raise NotImplementedError out_mask[tmp_mask] = obj_id + 1 # [B, 1, H, W] if return_as_onehot: out_mask = torch.zeros_like(masks) # [B, nobj, H, W] for obj_id in range(n_obj): try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th) except: raise NotImplementedError out_mask[:, obj_id] = tmp_mask[:,0].type(torch.cuda.FloatTensor) return out_mask
35.540541
84
0.607605
import torch def combine_masks_with_batch(masks, n_obj, th=0.5, return_as_onehot = False): marker = torch.argmax(masks, dim=1, keepdim=True) if not return_as_onehot: out_mask = torch.unsqueeze(torch.zeros_like(masks)[:,0],1) for obj_id in range(n_obj): try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th) except: raise NotImplementedError out_mask[tmp_mask] = obj_id + 1 if return_as_onehot: out_mask = torch.zeros_like(masks) for obj_id in range(n_obj): try :tmp_mask = (marker == obj_id) * (masks[:,obj_id].unsqueeze(1) > th) except: raise NotImplementedError out_mask[:, obj_id] = tmp_mask[:,0].type(torch.cuda.FloatTensor) return out_mask
true
true
f703a42c92053186384ad6eaafe8ad76c4d7b51e
120
py
Python
python/GtBurst/Task.py
fermi-lat/pyBurstAnalysisGUI
add53fe77ef71cb64a27751f024fb914f7cc0863
[ "BSD-3-Clause" ]
2
2019-03-06T15:48:20.000Z
2020-05-02T15:02:57.000Z
python/GtBurst/Task.py
fermi-lat/pyBurstAnalysisGUI
add53fe77ef71cb64a27751f024fb914f7cc0863
[ "BSD-3-Clause" ]
5
2019-01-23T11:35:41.000Z
2019-03-29T17:36:19.000Z
python/GtBurst/Task.py
fermi-lat/pyBurstAnalysisGUI
add53fe77ef71cb64a27751f024fb914f7cc0863
[ "BSD-3-Clause" ]
null
null
null
class Task(object): def __init__(self,name): self.name = name pass def run(self): pass
15
36
0.533333
class Task(object): def __init__(self,name): self.name = name pass def run(self): pass
true
true
f703a432e4ed647c1bb292ffa21bc5f491234419
628
py
Python
manage.py
dyhmzall/budget
693ed201ce65ec5f2656759c8d417ca24a49416b
[ "MIT" ]
null
null
null
manage.py
dyhmzall/budget
693ed201ce65ec5f2656759c8d417ca24a49416b
[ "MIT" ]
1
2021-02-02T10:03:33.000Z
2021-02-02T10:03:33.000Z
manage.py
dyhmzall/budget
693ed201ce65ec5f2656759c8d417ca24a49416b
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geekshop.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == "__main__": main()
28.545455
73
0.683121
import os import sys def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geekshop.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == "__main__": main()
true
true
f703a44c1907fc72f119630ad4366b80be2e6fbf
3,795
py
Python
wb/main/jobs/export_inference_report/export_inference_report_job.py
apaniukov/workbench
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
[ "Apache-2.0" ]
23
2022-03-17T12:24:09.000Z
2022-03-31T09:13:30.000Z
wb/main/jobs/export_inference_report/export_inference_report_job.py
apaniukov/workbench
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
[ "Apache-2.0" ]
18
2022-03-21T08:17:44.000Z
2022-03-30T12:42:30.000Z
wb/main/jobs/export_inference_report/export_inference_report_job.py
apaniukov/workbench
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
[ "Apache-2.0" ]
16
2022-03-17T12:24:14.000Z
2022-03-31T12:15:12.000Z
""" OpenVINO DL Workbench Class for creation job for creating and exporting inference report Copyright (c) 2020 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import csv import json import os from contextlib import closing from sqlalchemy.orm import Session from wb.extensions_factories.database import get_db_session_for_celery from wb.main.enumerates import JobTypesEnum, StatusEnum from wb.main.jobs.interfaces.ijob import IJob from wb.main.jobs.interfaces.job_observers import ExportInferenceReportDBObserver from wb.main.models import SingleInferenceInfoModel, DownloadableArtifactsModel, InferenceReportExportJobModel class InferenceReportExportJob(IJob): job_type = JobTypesEnum.export_inference_report _job_model_class = InferenceReportExportJobModel ext = '.csv' def __init__(self, job_id: int, **unused_kwargs): super().__init__(job_id=job_id) export_project_report_db_observer = ExportInferenceReportDBObserver(job_id=self._job_id) self._job_state_subject.attach(export_project_report_db_observer) self._attach_default_db_and_socket_observers() def run(self): self._job_state_subject.update_state(log='Starting inference report creation job.', status=StatusEnum.running, progress=0) with closing(get_db_session_for_celery()) as session: session: Session job_model: InferenceReportExportJobModel = self.get_job_model(session) artifact: DownloadableArtifactsModel = job_model.shared_artifact artifact_path = artifact.build_full_artifact_path(ext=self.ext) inference_job: SingleInferenceInfoModel = job_model.inference per_layer_data = json.loads(inference_job.runtime_representation) # create report with open(artifact_path, 'w', newline='') as csvfile: report_writer = csv.writer(csvfile, delimiter=';') report_writer.writerow( ['Execution Order', 'Layer Name', 'Layer Type', 'Execution Time', 'Runtime Precision']) for layer in per_layer_data: report_writer.writerow([ layer['details'][0]['executionParams']['execOrder'], layer['layerName'], layer['layerType'], layer['execTime'][0] if layer['execTime'][0] != 'not_executed' else 0, layer['runtimePrecision'], ]) artifact.update(artifact_path) artifact.write_record(session) self._job_state_subject.update_state(log='Finishing inference report job.', status=StatusEnum.ready, progress=100) self._job_state_subject.detach_all_observers() def on_failure(self, exception: Exception): with closing(get_db_session_for_celery()) as session: job_model = self.get_job_model(session) artifact = job_model.downloadable_artifact artifact_path = artifact.build_full_artifact_path(ext=self.ext) if os.path.isfile(artifact_path): os.remove(artifact_path) super().on_failure(exception)
46.851852
110
0.680896
import csv import json import os from contextlib import closing from sqlalchemy.orm import Session from wb.extensions_factories.database import get_db_session_for_celery from wb.main.enumerates import JobTypesEnum, StatusEnum from wb.main.jobs.interfaces.ijob import IJob from wb.main.jobs.interfaces.job_observers import ExportInferenceReportDBObserver from wb.main.models import SingleInferenceInfoModel, DownloadableArtifactsModel, InferenceReportExportJobModel class InferenceReportExportJob(IJob): job_type = JobTypesEnum.export_inference_report _job_model_class = InferenceReportExportJobModel ext = '.csv' def __init__(self, job_id: int, **unused_kwargs): super().__init__(job_id=job_id) export_project_report_db_observer = ExportInferenceReportDBObserver(job_id=self._job_id) self._job_state_subject.attach(export_project_report_db_observer) self._attach_default_db_and_socket_observers() def run(self): self._job_state_subject.update_state(log='Starting inference report creation job.', status=StatusEnum.running, progress=0) with closing(get_db_session_for_celery()) as session: session: Session job_model: InferenceReportExportJobModel = self.get_job_model(session) artifact: DownloadableArtifactsModel = job_model.shared_artifact artifact_path = artifact.build_full_artifact_path(ext=self.ext) inference_job: SingleInferenceInfoModel = job_model.inference per_layer_data = json.loads(inference_job.runtime_representation) with open(artifact_path, 'w', newline='') as csvfile: report_writer = csv.writer(csvfile, delimiter=';') report_writer.writerow( ['Execution Order', 'Layer Name', 'Layer Type', 'Execution Time', 'Runtime Precision']) for layer in per_layer_data: report_writer.writerow([ layer['details'][0]['executionParams']['execOrder'], layer['layerName'], layer['layerType'], layer['execTime'][0] if layer['execTime'][0] != 'not_executed' else 0, layer['runtimePrecision'], ]) artifact.update(artifact_path) artifact.write_record(session) self._job_state_subject.update_state(log='Finishing inference report job.', status=StatusEnum.ready, progress=100) self._job_state_subject.detach_all_observers() def on_failure(self, exception: Exception): with closing(get_db_session_for_celery()) as session: job_model = self.get_job_model(session) artifact = job_model.downloadable_artifact artifact_path = artifact.build_full_artifact_path(ext=self.ext) if os.path.isfile(artifact_path): os.remove(artifact_path) super().on_failure(exception)
true
true
f703a44ccaf83acb8a92c16a75f05aff6da856a0
109
py
Python
caffe-tensorflow/kaffe/errors.py
petercheng00/PSPNet-Keras-tensorflow
d50583786a3e8782dd1b735d268e57392cd8c646
[ "MIT" ]
3,209
2015-11-10T06:52:59.000Z
2022-03-10T05:17:28.000Z
caffe-tensorflow/kaffe/errors.py
petercheng00/PSPNet-Keras-tensorflow
d50583786a3e8782dd1b735d268e57392cd8c646
[ "MIT" ]
207
2017-01-01T17:58:57.000Z
2021-11-06T21:40:14.000Z
caffe-tensorflow/kaffe/errors.py
petercheng00/PSPNet-Keras-tensorflow
d50583786a3e8782dd1b735d268e57392cd8c646
[ "MIT" ]
1,218
2015-11-10T23:55:48.000Z
2022-01-07T07:36:57.000Z
import sys class KaffeError(Exception): pass def print_stderr(msg): sys.stderr.write('%s\n' % msg)
13.625
34
0.678899
import sys class KaffeError(Exception): pass def print_stderr(msg): sys.stderr.write('%s\n' % msg)
true
true
f703a55785709def35ca98b2131d75c174529cbf
1,241
py
Python
tests/test_handler.py
priyanshu-kumar02/personfinder
d5390b60709cd0ccaaade9a3b6224a60cd523ed9
[ "Apache-2.0" ]
561
2015-02-16T07:59:42.000Z
2022-03-30T17:31:21.000Z
tests/test_handler.py
Anthonymcqueen21/personfinder
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
[ "Apache-2.0" ]
591
2015-01-30T05:09:30.000Z
2022-02-26T09:31:25.000Z
tests/test_handler.py
Anthonymcqueen21/personfinder
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
[ "Apache-2.0" ]
258
2015-01-25T18:35:12.000Z
2021-12-25T01:44:14.000Z
# Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unittest initialization for handlers. No actual tests""" __author__ = 'lschumacher@google.com (Lee Schumacher)' import main import webob import urllib from google.appengine.ext import webapp def initialize_handler( handler_class, action, repo='haiti', environ=None, params=None): """Initialize handler_cless and return initialized handler. """ params_str = ('?' + urllib.urlencode(params)) if params else '' request = webapp.Request(webob.Request.blank( '/' + repo + '/' + action + params_str, environ=environ).environ) response = webapp.Response() return handler_class(request, response, main.setup_env(request))
34.472222
74
0.734085
__author__ = 'lschumacher@google.com (Lee Schumacher)' import main import webob import urllib from google.appengine.ext import webapp def initialize_handler( handler_class, action, repo='haiti', environ=None, params=None): params_str = ('?' + urllib.urlencode(params)) if params else '' request = webapp.Request(webob.Request.blank( '/' + repo + '/' + action + params_str, environ=environ).environ) response = webapp.Response() return handler_class(request, response, main.setup_env(request))
true
true
f703a5c45317e7de4cb67aaa0ea6702a7d8961c4
1,832
py
Python
chainer/functions/array/copy.py
Evanc123/chainer
929af7189b1271683200aa9b0ba6da2dd3dee110
[ "MIT" ]
90
2017-02-23T04:04:47.000Z
2020-04-09T12:06:50.000Z
chainer/functions/array/copy.py
Evanc123/chainer
929af7189b1271683200aa9b0ba6da2dd3dee110
[ "MIT" ]
7
2017-07-23T13:38:06.000Z
2018-07-10T07:09:03.000Z
chainer/functions/array/copy.py
Evanc123/chainer
929af7189b1271683200aa9b0ba6da2dd3dee110
[ "MIT" ]
32
2017-02-28T07:40:38.000Z
2021-02-17T11:33:09.000Z
from chainer.backends import cuda from chainer import function_node from chainer.utils import type_check class Copy(function_node.FunctionNode): """Copies the input variable onto the specified device.""" def __init__(self, out_device): self.out_device = out_device def check_type_forward(self, in_types): type_check.expect( in_types.size() == 1 ) def forward(self, inputs): x, = inputs self._in_device = cuda.get_device_from_array(x).id if int(self.out_device) == -1: return cuda.to_cpu(x), else: return cuda.to_gpu(x, self.out_device), def backward(self, indexes, grad_outputs): return Copy(self._in_device).apply(grad_outputs) def copy(x, dst): """Copies the input variable onto the specified device. This function copies the array of input variable onto the device specified by ``dst``. When ``dst == -1``, it copies the array onto the host memory. This function supports copies from host to host, from host to device, from device to device and from device to host. Args: x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): Variable to be copied. dst (int): Target device specifier. Returns: ~chainer.Variable: Output variable. .. admonition:: Example >>> import chainer.backends.cuda as cuda >>> x = np.random.uniform(-1, 1, (5, 10)) >>> cuda.get_device_from_array(x).id -1 >>> y = F.copy(x, 0) # from host to device0 >>> cuda.get_device_from_array(y.data).id 0 >>> z = F.copy(y, -1) # from device0 to host >>> cuda.get_device_from_array(z.data).id -1 """ y, = Copy(dst).apply((x,)) return y
29.079365
78
0.617358
from chainer.backends import cuda from chainer import function_node from chainer.utils import type_check class Copy(function_node.FunctionNode): def __init__(self, out_device): self.out_device = out_device def check_type_forward(self, in_types): type_check.expect( in_types.size() == 1 ) def forward(self, inputs): x, = inputs self._in_device = cuda.get_device_from_array(x).id if int(self.out_device) == -1: return cuda.to_cpu(x), else: return cuda.to_gpu(x, self.out_device), def backward(self, indexes, grad_outputs): return Copy(self._in_device).apply(grad_outputs) def copy(x, dst): y, = Copy(dst).apply((x,)) return y
true
true
f703a604a7bd2e331c356593203c89fd82af1046
1,031
py
Python
program_helper/ast/ops/leaf_ops/DSymtabMod.py
jajajaqlt/nsg
1873f2b5e10441110c3c69940ceb4650f9684ac0
[ "Apache-2.0" ]
10
2021-11-02T18:30:38.000Z
2022-03-21T06:31:33.000Z
program_helper/ast/ops/leaf_ops/DSymtabMod.py
rohanmukh/nag
f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3
[ "Apache-2.0" ]
2
2021-11-05T18:40:42.000Z
2022-03-30T04:33:08.000Z
program_helper/ast/ops/leaf_ops/DSymtabMod.py
rohanmukh/nag
f2c4b8e60a97c58a6a1c549cc8b4753ebfe8a5e3
[ "Apache-2.0" ]
2
2021-11-03T19:14:06.000Z
2021-11-03T23:47:09.000Z
# Copyright 2017 Rice University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from program_helper.ast.ops import Node from utilities.vocab_building_dictionary import DELIM class DSymtabMod(Node): def __init__(self, val, type_helper=None, child=None, sibling=None): super().__init__(val, child, sibling) self.type_helper = type_helper if type_helper is not None else DELIM self.type = DSymtabMod.name() @staticmethod def name(): return 'DSymtabMod'
33.258065
76
0.71775
from program_helper.ast.ops import Node from utilities.vocab_building_dictionary import DELIM class DSymtabMod(Node): def __init__(self, val, type_helper=None, child=None, sibling=None): super().__init__(val, child, sibling) self.type_helper = type_helper if type_helper is not None else DELIM self.type = DSymtabMod.name() @staticmethod def name(): return 'DSymtabMod'
true
true
f703a64b7295f2cbaa086ee03b51c095d6f49bc4
263
py
Python
app/forms/producao_finalizada_form.py
pedroferronato/gerenciamento-rural
5ed873caf9fdf1da2a26938b8cee57b55e7636f0
[ "MIT" ]
null
null
null
app/forms/producao_finalizada_form.py
pedroferronato/gerenciamento-rural
5ed873caf9fdf1da2a26938b8cee57b55e7636f0
[ "MIT" ]
null
null
null
app/forms/producao_finalizada_form.py
pedroferronato/gerenciamento-rural
5ed873caf9fdf1da2a26938b8cee57b55e7636f0
[ "MIT" ]
null
null
null
from datetime import date from flask_wtf import FlaskForm from wtforms import StringField class ProducaoFinalizadasForm(FlaskForm): nome = StringField('Nome:') data_comeco = StringField('Data de início:') data_coleta = StringField('Data de coleta:')
29.222222
48
0.768061
from datetime import date from flask_wtf import FlaskForm from wtforms import StringField class ProducaoFinalizadasForm(FlaskForm): nome = StringField('Nome:') data_comeco = StringField('Data de início:') data_coleta = StringField('Data de coleta:')
true
true
f703a650e127f5b3b3e5ba7bf6a53f3f2cf81f06
2,501
py
Python
automox_console_sdk/models/one_of_device_filters_inner_value_items.py
AutomoxCommunity/automox-console-sdk-python
9e921b138d63f90750e071d0a40e1d7edfa06733
[ "MIT" ]
1
2021-10-05T22:09:10.000Z
2021-10-05T22:09:10.000Z
automox_console_sdk/models/one_of_device_filters_inner_value_items.py
AutomoxCommunity/automox-console-sdk-python
9e921b138d63f90750e071d0a40e1d7edfa06733
[ "MIT" ]
1
2021-09-16T06:00:51.000Z
2021-09-16T06:00:51.000Z
automox_console_sdk/models/one_of_device_filters_inner_value_items.py
AutomoxCommunity/automox-console-sdk-python
9e921b138d63f90750e071d0a40e1d7edfa06733
[ "MIT" ]
4
2021-09-16T02:35:32.000Z
2022-02-16T01:09:57.000Z
# coding: utf-8 """ Automox Console API API for use with the Automox Console # noqa: E501 OpenAPI spec version: 2021-11-16 Contact: support@automox.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class OneOfDeviceFiltersInnerValueItems(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """OneOfDeviceFiltersInnerValueItems - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(OneOfDeviceFiltersInnerValueItems, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, OneOfDeviceFiltersInnerValueItems): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
29.423529
90
0.565774
import pprint import re import six class OneOfDeviceFiltersInnerValueItems(object): swagger_types = { } attribute_map = { } def __init__(self): self.discriminator = None def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(OneOfDeviceFiltersInnerValueItems, dict): for key, value in self.items(): result[key] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, OneOfDeviceFiltersInnerValueItems): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f703a6f04b8a6b8bcf74cd1faf596071853ca5d0
13,045
py
Python
src/TDRPG.py
codysandahl/pygamer-simple-rpg
ebb7be7e474a95dfa29c1f7055ad0f7ed2b3101e
[ "MIT" ]
null
null
null
src/TDRPG.py
codysandahl/pygamer-simple-rpg
ebb7be7e474a95dfa29c1f7055ad0f7ed2b3101e
[ "MIT" ]
null
null
null
src/TDRPG.py
codysandahl/pygamer-simple-rpg
ebb7be7e474a95dfa29c1f7055ad0f7ed2b3101e
[ "MIT" ]
null
null
null
import ugame import stage import utils GAME = None ####################################################### # Game class Game(stage.Stage): """Base class for a game and its display""" # TODO: add game state machine # TODO: make each screen a state, and make a transition between them when player overlaps with trigger zones # TODO: have a combat state def __init__(self, display=None, fps=12): # require singleton global GAME if GAME: raise ValueError("Only one Game is allowed at a time") GAME = self # NOTE: PyGamer display is 160x128 if display: super().__init__(display, fps) else: super().__init__(ugame.display, fps) self.midX = int(self.width*0.5) self.midY = int(self.height*0.5) self.spriteSize = 16 # static size of sprites in pixels using the stage library self.bounceX = self.width-self.spriteSize self.bounceY = self.height-self.spriteSize self.tilesX = int(self.width/self.spriteSize) # number of tiles that will fit in game self.tilesY = int(self.height/self.spriteSize) self.map = None self.updaters = [] self.sprites = [] self.forceRefresh = False # force a refresh on the next frame self._pauseObject = None # object that receives updates while game is paused self.framesToWaitAfterPause = 2 self._curFramesWaiting = 0 def addToUpdates(self, obj): if isinstance(obj, list): self.updaters.extend(obj) else: self.updaters.append(obj) def removeFromUpdates(self, obj): if not isinstance(obj, list): obj = list(obj) for o in obj: self.updaters.remove(o) def addToSprites(self, obj, updater=True): if isinstance(obj, list): self.sprites.extend(obj) else: self.sprites.append(obj) if updater: self.addToUpdates(obj) def removeFromSprites(self, obj, updater=True): if not isinstance(obj, list): obj = list(obj) for o in obj: self.sprites.remove(o) if updater: self.removeFromUpdates(obj) def pause(self, pauseObject): self._pauseObject = pauseObject def resume(self): self._pauseObject = None self._curFramesWaiting = 0 def gameLoop(self): while True: if self._pauseObject: self._pauseObject.update() elif self._curFramesWaiting < self.framesToWaitAfterPause: ugame.buttons.get_pressed() # clear out button press cache self._curFramesWaiting += 1 else: for obj in self.updaters: obj.update() if not self.forceRefresh: self.render_sprites(self.sprites) else: self.render_block(0, 0) self.forceRefresh = False self.tick() ####################################################### # Map class TileMap(stage.Grid): """A tile map for the whole screen, utilizing a tile set from the given bank""" def __init__(self, bank, width=8, height=8, palette=None, buffer=None): super().__init__(bank, width, height, palette, buffer) self.shaking = 0 self.framesToShake = 4 self._curShakeFrame = 0 self.solidTypes = [] # tile types that should be treated as solid walls for collision self.triggerTypes = [] # tile types that should trigger some action when overlapped def fromHexList(self, tileList): """ Given a list of hex codes, update the tile map Example: tileList = [ "0123456789ABCDEF", # row 0 "0123456790ABCDEF", # row 1 ... ] """ # validate input if len(tileList) != self.height: raise ValueError("Length of tileList is {} but expected {}".format(len(tileList), self.height)) # iterate through tile list x = 0 y = 0 for row in tileList: if len(row) != self.width: raise ValueError("Length of row {} is {} but expected {}".format(y, len(row), self.width)) for tileValue in row: self.tile(x, y, int(tileValue, 16)) x += 1 y += 1 x = 0 def shake(self, amount=4): self.shaking = amount self._curShakeFrame = 0 def handleTrigger(self, sprite, x, y, tileType): """Handle special actions based on the tile type""" pass def update(self): if self.shaking != 0: GAME.forceRefresh = True if self._curShakeFrame % 2 == 0: self.move(self.shaking, 0) else: self.move(-self.shaking, 0) self._curShakeFrame += 1 if self._curShakeFrame >= self.framesToShake: self._curShakeFrame = 0 self.shaking = 0 ####################################################### # Entities class Moveable(stage.Sprite): """Base class for moveable sprites like a player or enemy""" def __init__(self, bank, x, y): super().__init__(bank, 0, x, y) self.x = x self.y = y self.collider = utils.BoundingBox(self,2, 2, 12, 12) self.animations = utils.StateMachine() def getTilesInCollider(self, dx=0, dy=0): """Calculate the grid tiles that are underneath each corner of this sprite's bounding box""" tiles = [] rect = utils.Rectangle(self.collider.x+dx, self.collider.y+dy, self.collider.width, self.collider.height) # top left point = rect.getTopLeft() point[0] >>= 4 # divide by 16 point[1] >>= 4 # divide by 16 if point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY: tiles.append(point) # top right point = rect.getTopRight() point[0] >>= 4 point[1] >>= 4 if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles: tiles.append(point) # bottom left point = rect.getBtmLeft() point[0] >>= 4 point[1] >>= 4 if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles: tiles.append(point) # bottom right point = rect.getBtmRight() point[0] >>= 4 point[1] >>= 4 if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles: tiles.append(point) # return list of tiles return tiles def getMovement(self): """ Determine desired movement (whether AI or player controls) and return dx, dy for this frame NOTE: tile collision currently only supports moving in one direction at a time (no diagonal) """ return 0, 0 def applyMovementAndAnims(self, dx, dy): """Apply the desired movement and animations to this sprite""" # handle movement and constrain to the stage self.x = max(min(self.x + dx, GAME.bounceX), 0) self.y = max(min(self.y + dy, GAME.bounceY), 0) # finish movement self.move(self.x, self.y) self.collider.update() self.animations.update() def checkTileCollision(self, dx, dy): """Check the game map for collisions with tiles. Works best by checking one axis at a time""" if dx != 0: # check map for impassable OR special handler tiles tiles = self.getTilesInCollider(dx, 0) for t in tiles: tileType = GAME.map.tile(x=t[0], y=t[1]) if tileType in GAME.map.solidTypes: if dx > 0: self.x = ((t[0]-1) << 4) + self.collider.dx - 1 else: self.x = ((t[0]+1) << 4) - self.collider.dx + 1 dx = 0 break elif tileType in GAME.map.triggerTypes: GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType) if dy != 0: # check map for impassable OR special handler tiles tiles = self.getTilesInCollider(0, dy) for t in tiles: tileType = GAME.map.tile(x=t[0], y=t[1]) if tileType in GAME.map.solidTypes: if dy > 0: self.y = ((t[1]-1) << 4) + self.collider.dy - 1 else: self.y = ((t[1]+1) << 4) - self.collider.dy + 1 dy = 0 break elif tileType in GAME.map.triggerTypes: GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType) return dx, dy def getAnimation(self, dx, dy): """Update the animation based on the movement and state""" pass def update(self): super().update() dx, dy = self.getMovement() dx, dy = self.checkTileCollision(dx, dy) self.getAnimation(dx, dy) self.applyMovementAndAnims(dx, dy) ####################################################### # Animation Helpers class AnimState(utils.State): """ Base class for animation states in a state machine Expects all the frames to be consecutive in the sprite sheet Can delay a number of game frames between each animation frame (ex: delay of 1 with 12 fps means delay 1/12 sec between animation frames) """ LOOP_FOREVER = -1 ROTATE_MIRROR = 4 ROTATE_90CW = 1 ROTATE_90CCW = 2 def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0): """ Create the new state. By default, the animation will advance each game frame, and it will loop forever. """ super().__init__(name) self.sprite = sprite self.frameStart = frameStart self.frameEnd = frameEnd self._curFrame = frameStart self.delay = delay self._curDelay = 0 self.numTimes = numTimes self._curTimes = 0 self.nextState = nextState self.rotate = rotate def enter(self, machine): utils.log("Entering {} and setting frame to {}. Will repeat {} times and then go to state {}".format(self.name, self.frameStart, self.numTimes, self.nextState)) self.sprite.set_frame(self.frameStart, self.rotate) self._curFrame = self.frameStart self._curDelay = 0 def update(self, machine): # handle delay in the animation if self.delay > 0: if self._curDelay < self.delay: self._curDelay += 1 return # advance the frame in the animation self._curFrame += 1 self._curDelay = 0 # handle looping/exiting animation if self._curFrame > self.frameEnd: self._curFrame = self.frameStart self._curTimes += 1 if self.numTimes != self.LOOP_FOREVER and self._curTimes > self.numTimes: self.goToNextState(machine) return self.sprite.set_frame(self._curFrame, self.rotate) def goToNextState(self, machine): machine.goToState(self.nextState) class AnimLoop(AnimState): """ Loop an animation for a sprite. Expects all the frames to be consecutive in the sprite sheet. """ def __init__(self, name, sprite, frameStart, frameEnd, delay=0, rotate=0): super().__init__(name, sprite, frameStart, frameEnd, delay, rotate=rotate) class AnimRepeatN(AnimState): """ Repeat an animation N times. Expects all the frames to be consecutive in the sprite sheet. """ def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0): super().__init__(name, sprite, frameStart, frameEnd, delay, numTimes, nextState, rotate) ####################################################### # GUI class Dialog(TileMap): """A modal text dialog built using a tile map""" def __init__(self, bank, width=8, height=2, text1=None, text2=None, sprite1=None, palette=None, buffer=None): super().__init__(bank, width, height, palette, buffer) self.showing = False # first line of text self.marginX = 4 self.marginY = 4 self.text = None if text1: self.text1 = stage.Text(width=len(text1), height=1) self.text1.text(text1) # second line of text self.marginX2 = self.marginX self.marginY2 = self.marginY + 15 self.text2 = None if text2: self.text2 = stage.Text(width=len(text2), height=1) self.text2.text(text2) # extra sprite self.sprite1 = None if sprite1: self.sprite1 = sprite1 # frames to wait at start (avoids accidental button presses) self.framesToWait = 2 self._curFramesWaiting = 0 def move(self, x, y, z=None): if self.text1: self.text1.move(x+self.marginX, y+self.marginY, z) if self.text2: self.text2.move(x+self.marginX2, y+self.marginY2, z) super().move(x, y, z) def show(self): """Display this dialog on top of all the other layers and pause the game""" if self.showing: return GAME.layers.insert(0, self) if self.text1: GAME.layers.insert(0, self.text1) if self.text2: GAME.layers.insert(0, self.text2) if self.sprite1: GAME.layers.insert(0, self.sprite1) GAME.forceRefresh = True GAME.pause(self) self.showing = True self._curFramesWaiting = 0 def hide(self): """Hide this dialog and unpause the game""" if not self.showing: return GAME.layers.remove(self) if self.text1: GAME.layers.remove(self.text1) if self.text2: GAME.layers.remove(self.text2) if self.sprite1: GAME.layers.remove(self.sprite1) GAME.forceRefresh = True GAME.resume() self.showing = False def update(self): """Update function called while the game is paused""" if self._curFramesWaiting < self.framesToWait: self._curFramesWaiting += 1 return
32.289604
164
0.630433
import ugame import stage import utils GAME = None class Game(stage.Stage): def __init__(self, display=None, fps=12): global GAME if GAME: raise ValueError("Only one Game is allowed at a time") GAME = self if display: super().__init__(display, fps) else: super().__init__(ugame.display, fps) self.midX = int(self.width*0.5) self.midY = int(self.height*0.5) self.spriteSize = 16 self.bounceX = self.width-self.spriteSize self.bounceY = self.height-self.spriteSize self.tilesX = int(self.width/self.spriteSize) self.tilesY = int(self.height/self.spriteSize) self.map = None self.updaters = [] self.sprites = [] self.forceRefresh = False self._pauseObject = None self.framesToWaitAfterPause = 2 self._curFramesWaiting = 0 def addToUpdates(self, obj): if isinstance(obj, list): self.updaters.extend(obj) else: self.updaters.append(obj) def removeFromUpdates(self, obj): if not isinstance(obj, list): obj = list(obj) for o in obj: self.updaters.remove(o) def addToSprites(self, obj, updater=True): if isinstance(obj, list): self.sprites.extend(obj) else: self.sprites.append(obj) if updater: self.addToUpdates(obj) def removeFromSprites(self, obj, updater=True): if not isinstance(obj, list): obj = list(obj) for o in obj: self.sprites.remove(o) if updater: self.removeFromUpdates(obj) def pause(self, pauseObject): self._pauseObject = pauseObject def resume(self): self._pauseObject = None self._curFramesWaiting = 0 def gameLoop(self): while True: if self._pauseObject: self._pauseObject.update() elif self._curFramesWaiting < self.framesToWaitAfterPause: ugame.buttons.get_pressed() self._curFramesWaiting += 1 else: for obj in self.updaters: obj.update() if not self.forceRefresh: self.render_sprites(self.sprites) else: self.render_block(0, 0) self.forceRefresh = False self.tick() class TileMap(stage.Grid): def __init__(self, bank, width=8, height=8, palette=None, buffer=None): super().__init__(bank, width, height, palette, buffer) self.shaking = 0 self.framesToShake = 4 self._curShakeFrame = 0 self.solidTypes = [] self.triggerTypes = [] def fromHexList(self, tileList): if len(tileList) != self.height: raise ValueError("Length of tileList is {} but expected {}".format(len(tileList), self.height)) x = 0 y = 0 for row in tileList: if len(row) != self.width: raise ValueError("Length of row {} is {} but expected {}".format(y, len(row), self.width)) for tileValue in row: self.tile(x, y, int(tileValue, 16)) x += 1 y += 1 x = 0 def shake(self, amount=4): self.shaking = amount self._curShakeFrame = 0 def handleTrigger(self, sprite, x, y, tileType): pass def update(self): if self.shaking != 0: GAME.forceRefresh = True if self._curShakeFrame % 2 == 0: self.move(self.shaking, 0) else: self.move(-self.shaking, 0) self._curShakeFrame += 1 if self._curShakeFrame >= self.framesToShake: self._curShakeFrame = 0 self.shaking = 0 class Moveable(stage.Sprite): def __init__(self, bank, x, y): super().__init__(bank, 0, x, y) self.x = x self.y = y self.collider = utils.BoundingBox(self,2, 2, 12, 12) self.animations = utils.StateMachine() def getTilesInCollider(self, dx=0, dy=0): tiles = [] rect = utils.Rectangle(self.collider.x+dx, self.collider.y+dy, self.collider.width, self.collider.height) point = rect.getTopLeft() point[0] >>= 4 point[1] >>= 4 if point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY: tiles.append(point) point = rect.getTopRight() point[0] >>= 4 point[1] >>= 4 if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles: tiles.append(point) point = rect.getBtmLeft() point[0] >>= 4 point[1] >>= 4 if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles: tiles.append(point) point = rect.getBtmRight() point[0] >>= 4 point[1] >>= 4 if (point[0] >= 0 and point[1] >= 0 and point[0] < GAME.tilesX and point[1] < GAME.tilesY) and not point in tiles: tiles.append(point) return tiles def getMovement(self): return 0, 0 def applyMovementAndAnims(self, dx, dy): self.x = max(min(self.x + dx, GAME.bounceX), 0) self.y = max(min(self.y + dy, GAME.bounceY), 0) self.move(self.x, self.y) self.collider.update() self.animations.update() def checkTileCollision(self, dx, dy): if dx != 0: tiles = self.getTilesInCollider(dx, 0) for t in tiles: tileType = GAME.map.tile(x=t[0], y=t[1]) if tileType in GAME.map.solidTypes: if dx > 0: self.x = ((t[0]-1) << 4) + self.collider.dx - 1 else: self.x = ((t[0]+1) << 4) - self.collider.dx + 1 dx = 0 break elif tileType in GAME.map.triggerTypes: GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType) if dy != 0: tiles = self.getTilesInCollider(0, dy) for t in tiles: tileType = GAME.map.tile(x=t[0], y=t[1]) if tileType in GAME.map.solidTypes: if dy > 0: self.y = ((t[1]-1) << 4) + self.collider.dy - 1 else: self.y = ((t[1]+1) << 4) - self.collider.dy + 1 dy = 0 break elif tileType in GAME.map.triggerTypes: GAME.map.handleTrigger(self, x=t[0], y=t[1], tileType=tileType) return dx, dy def getAnimation(self, dx, dy): pass def update(self): super().update() dx, dy = self.getMovement() dx, dy = self.checkTileCollision(dx, dy) self.getAnimation(dx, dy) self.applyMovementAndAnims(dx, dy) class AnimState(utils.State): LOOP_FOREVER = -1 ROTATE_MIRROR = 4 ROTATE_90CW = 1 ROTATE_90CCW = 2 def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0): super().__init__(name) self.sprite = sprite self.frameStart = frameStart self.frameEnd = frameEnd self._curFrame = frameStart self.delay = delay self._curDelay = 0 self.numTimes = numTimes self._curTimes = 0 self.nextState = nextState self.rotate = rotate def enter(self, machine): utils.log("Entering {} and setting frame to {}. Will repeat {} times and then go to state {}".format(self.name, self.frameStart, self.numTimes, self.nextState)) self.sprite.set_frame(self.frameStart, self.rotate) self._curFrame = self.frameStart self._curDelay = 0 def update(self, machine): if self.delay > 0: if self._curDelay < self.delay: self._curDelay += 1 return self._curFrame += 1 self._curDelay = 0 if self._curFrame > self.frameEnd: self._curFrame = self.frameStart self._curTimes += 1 if self.numTimes != self.LOOP_FOREVER and self._curTimes > self.numTimes: self.goToNextState(machine) return self.sprite.set_frame(self._curFrame, self.rotate) def goToNextState(self, machine): machine.goToState(self.nextState) class AnimLoop(AnimState): def __init__(self, name, sprite, frameStart, frameEnd, delay=0, rotate=0): super().__init__(name, sprite, frameStart, frameEnd, delay, rotate=rotate) class AnimRepeatN(AnimState): def __init__(self, name, sprite, frameStart, frameEnd, delay=0, numTimes=-1, nextState='idle', rotate=0): super().__init__(name, sprite, frameStart, frameEnd, delay, numTimes, nextState, rotate) class Dialog(TileMap): def __init__(self, bank, width=8, height=2, text1=None, text2=None, sprite1=None, palette=None, buffer=None): super().__init__(bank, width, height, palette, buffer) self.showing = False self.marginX = 4 self.marginY = 4 self.text = None if text1: self.text1 = stage.Text(width=len(text1), height=1) self.text1.text(text1) self.marginX2 = self.marginX self.marginY2 = self.marginY + 15 self.text2 = None if text2: self.text2 = stage.Text(width=len(text2), height=1) self.text2.text(text2) self.sprite1 = None if sprite1: self.sprite1 = sprite1 self.framesToWait = 2 self._curFramesWaiting = 0 def move(self, x, y, z=None): if self.text1: self.text1.move(x+self.marginX, y+self.marginY, z) if self.text2: self.text2.move(x+self.marginX2, y+self.marginY2, z) super().move(x, y, z) def show(self): if self.showing: return GAME.layers.insert(0, self) if self.text1: GAME.layers.insert(0, self.text1) if self.text2: GAME.layers.insert(0, self.text2) if self.sprite1: GAME.layers.insert(0, self.sprite1) GAME.forceRefresh = True GAME.pause(self) self.showing = True self._curFramesWaiting = 0 def hide(self): if not self.showing: return GAME.layers.remove(self) if self.text1: GAME.layers.remove(self.text1) if self.text2: GAME.layers.remove(self.text2) if self.sprite1: GAME.layers.remove(self.sprite1) GAME.forceRefresh = True GAME.resume() self.showing = False def update(self): if self._curFramesWaiting < self.framesToWait: self._curFramesWaiting += 1 return
true
true
f703a7594603428c46c23789bb1ed339b09979a3
1,136
py
Python
kubi_ecs_logger/models/root_schema.py
kumina/kubi_ecs_logger
64d9519e0759a24253a4edc53e0c024675033d1c
[ "BSD-3-Clause" ]
6
2019-12-15T12:47:06.000Z
2022-01-11T08:54:58.000Z
kubi_ecs_logger/models/root_schema.py
kumina/kubi_ecs_logger
64d9519e0759a24253a4edc53e0c024675033d1c
[ "BSD-3-Clause" ]
null
null
null
kubi_ecs_logger/models/root_schema.py
kumina/kubi_ecs_logger
64d9519e0759a24253a4edc53e0c024675033d1c
[ "BSD-3-Clause" ]
null
null
null
""" TODO: Add doc what this file is doing """ from marshmallow import Schema, post_dump class RootSchema(Schema): SKIP_VALUES = [None] @post_dump def remove_skip_values(self, data, many, **kwargs): return { key: value for key, value in data.items() if value not in self.SKIP_VALUES } @post_dump(pass_original=True) def add_extra(self, serialized, original, many, **kwargs): from kubi_ecs_logger.models.include import INCLUDE_FIELDS for k, v in original.__dict__.items(): if k not in serialized and v is not None: type_name = str(type(v).__name__).lower() if type_name in INCLUDE_FIELDS: schema = INCLUDE_FIELDS[type_name].schema data = schema.dump(v) if "kind" not in data: data["kind"] = type_name serialized[k] = data elif isinstance(v, (int, float, str, bool, dict)): if not str(k).startswith('_'): serialized[k] = v return serialized
32.457143
66
0.553697
from marshmallow import Schema, post_dump class RootSchema(Schema): SKIP_VALUES = [None] @post_dump def remove_skip_values(self, data, many, **kwargs): return { key: value for key, value in data.items() if value not in self.SKIP_VALUES } @post_dump(pass_original=True) def add_extra(self, serialized, original, many, **kwargs): from kubi_ecs_logger.models.include import INCLUDE_FIELDS for k, v in original.__dict__.items(): if k not in serialized and v is not None: type_name = str(type(v).__name__).lower() if type_name in INCLUDE_FIELDS: schema = INCLUDE_FIELDS[type_name].schema data = schema.dump(v) if "kind" not in data: data["kind"] = type_name serialized[k] = data elif isinstance(v, (int, float, str, bool, dict)): if not str(k).startswith('_'): serialized[k] = v return serialized
true
true
f703a7955d6abdc825910fe297297ee33f76baef
830
py
Python
tensorflow/super_resolution/syndicai.py
muchemwal/models
49fd0a8a61b0e5dab196014bf47de7f62d97c884
[ "MIT" ]
2
2021-09-25T04:24:15.000Z
2022-01-19T14:04:36.000Z
tensorflow/super_resolution/syndicai.py
muchemwal/models
49fd0a8a61b0e5dab196014bf47de7f62d97c884
[ "MIT" ]
53
2020-11-13T19:07:45.000Z
2022-01-19T14:04:24.000Z
tensorflow/super_resolution/syndicai.py
muchemwal/models
49fd0a8a61b0e5dab196014bf47de7f62d97c884
[ "MIT" ]
12
2020-10-16T11:41:35.000Z
2022-03-16T05:58:10.000Z
import os import io import time import base64 import functools from PIL import Image import numpy as np import tensorflow as tf import tensorflow_hub as hub from helpers import * os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True" class PythonPredictor: def __init__(self, config): # Import TF-Hub module self.hub_module = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1") def predict(self, payload): # Preprocess image hr_image = preprocess_image(payload["image_b64"]) # Run model fake_image = self.hub_module(hr_image) # convert to base64 img = get_image(tf.squeeze(fake_image)) im_file = io.BytesIO() img.save(im_file, format="PNG") im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8") return im_bytes
23.055556
81
0.674699
import os import io import time import base64 import functools from PIL import Image import numpy as np import tensorflow as tf import tensorflow_hub as hub from helpers import * os.environ["TFHUB_DOWNLOAD_PROGRESS"] = "True" class PythonPredictor: def __init__(self, config): self.hub_module = hub.load("https://tfhub.dev/captain-pool/esrgan-tf2/1") def predict(self, payload): hr_image = preprocess_image(payload["image_b64"]) fake_image = self.hub_module(hr_image) img = get_image(tf.squeeze(fake_image)) im_file = io.BytesIO() img.save(im_file, format="PNG") im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8") return im_bytes
true
true
f703a8de7f659bb95c04b8627c44e3b904c79da1
7,768
py
Python
sc2_academy/ppo/my_epsilon_greedy_policy.py
Ricechrispi/sc2_academy
9ffed467fe019262035ac61d10c5cc3ee64a7bb2
[ "Apache-2.0" ]
2
2020-09-24T12:02:01.000Z
2022-01-24T11:05:41.000Z
sc2_academy/ppo/my_epsilon_greedy_policy.py
Ricechrispi/sc2_academy
9ffed467fe019262035ac61d10c5cc3ee64a7bb2
[ "Apache-2.0" ]
null
null
null
sc2_academy/ppo/my_epsilon_greedy_policy.py
Ricechrispi/sc2_academy
9ffed467fe019262035ac61d10c5cc3ee64a7bb2
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2018 The TF-Agents Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------------------ # DISCLAIMER: This is just a slightly adjusted version of the EpsilonGreedyPolicy in TF-Agents. # Most of the code here is directly copied from there. # I changed it such that the policy in the epsilon case is not random, but sampled from # the original policy distribution. # ------------------------------------------------------------------------------------------ """Policy implementation that generates epsilon-greedy actions from a policy. TODO(kbanoop): Make policy state optional in the action method. """ from __future__ import absolute_import from __future__ import division # Using Type Annotations. from __future__ import print_function from typing import Optional, Text import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import import tensorflow_probability as tfp from tf_agents.bandits.policies import policy_utilities from tf_agents.policies import greedy_policy from tf_agents.policies import tf_policy from tf_agents.trajectories import policy_step from tf_agents.typing import types from tf_agents.utils import nest_utils tfd = tfp.distributions class EpsilonGreedyPolicy(tf_policy.TFPolicy): """Returns epsilon-greedy samples of a given policy.""" def __init__(self, policy: tf_policy.TFPolicy, epsilon: types.FloatOrReturningFloat, name: Optional[Text] = None): """Builds an epsilon-greedy MixturePolicy wrapping the given policy. Args: policy: A policy implementing the tf_policy.TFPolicy interface. epsilon: The probability of taking the random action represented as a float scalar, a scalar Tensor of shape=(), or a callable that returns a float scalar or Tensor. name: The name of this policy. Raises: ValueError: If epsilon is invalid. """ try: observation_and_action_constraint_splitter = ( policy.observation_and_action_constraint_splitter) except AttributeError: observation_and_action_constraint_splitter = None try: accepts_per_arm_features = policy.accepts_per_arm_features except AttributeError: accepts_per_arm_features = False self._greedy_policy = greedy_policy.GreedyPolicy(policy) self._epsilon = epsilon self._epsilon_policy = self._greedy_policy.wrapped_policy # this is my main change from the original code super(EpsilonGreedyPolicy, self).__init__( policy.time_step_spec, policy.action_spec, policy.policy_state_spec, policy.info_spec, emit_log_probability=policy.emit_log_probability, observation_and_action_constraint_splitter=( observation_and_action_constraint_splitter), name=name) @property def wrapped_policy(self) -> tf_policy.TFPolicy: return self._greedy_policy.wrapped_policy def _variables(self): return self._greedy_policy.variables() def _get_epsilon(self): if callable(self._epsilon): return self._epsilon() else: return self._epsilon def _action(self, time_step, policy_state, seed): seed_stream = tfp.util.SeedStream(seed=seed, salt='epsilon_greedy') greedy_action = self._greedy_policy.action(time_step, policy_state) epsilon_action = self._epsilon_policy.action(time_step, (), seed_stream()) outer_shape = nest_utils.get_outer_shape(time_step, self._time_step_spec) rng = tf.random.uniform( outer_shape, maxval=1.0, seed=seed_stream(), name='epsilon_rng') cond = tf.greater(rng, self._get_epsilon()) # Selects the action/info from the random policy with probability epsilon. # TODO(b/133175894): tf.compat.v1.where only supports a condition which is # either a scalar or a vector. Use tf.compat.v2 so that it can support any # condition whose leading dimensions are the same as the other operands of # tf.where. outer_ndims = int(outer_shape.shape[0]) if outer_ndims >= 2: raise ValueError( 'Only supports batched time steps with a single batch dimension') action = tf.nest.map_structure(lambda g, r: tf.compat.v1.where(cond, g, r), greedy_action.action, epsilon_action.action) if greedy_action.info: if not epsilon_action.info: raise ValueError('Incompatible info field') # Note that the objects in PolicyInfo may have different shapes, so we # need to call nest_utils.where() on each type of object. info = tf.nest.map_structure(lambda x, y: nest_utils.where(cond, x, y), greedy_action.info, epsilon_action.info) if self._emit_log_probability: # At this point, info.log_probability contains the log prob of the # action chosen, conditioned on the policy that was chosen. We want to # emit the full log probability of the action, so we'll add in the log # probability of choosing the policy. random_log_prob = tf.nest.map_structure( lambda t: tf.math.log(tf.zeros_like(t) + self._get_epsilon()), info.log_probability) greedy_log_prob = tf.nest.map_structure( lambda t: tf.math.log(tf.ones_like(t) - self._get_epsilon()), random_log_prob) log_prob_of_chosen_policy = nest_utils.where(cond, greedy_log_prob, random_log_prob) log_prob = tf.nest.map_structure(lambda a, b: a + b, log_prob_of_chosen_policy, info.log_probability) info = policy_step.set_log_probability(info, log_prob) # Overwrite bandit policy info type. if policy_utilities.has_bandit_policy_type(info, check_for_tensor=True): # Generate mask of the same shape as bandit_policy_type (batch_size, 1). # This is the opposite of `cond`, which is 1-D bool tensor (batch_size,) # that is true when greedy policy was used, otherwise `cond` is false. random_policy_mask = tf.reshape(tf.logical_not(cond), tf.shape(info.bandit_policy_type)) bandit_policy_type = policy_utilities.bandit_policy_uniform_mask( info.bandit_policy_type, mask=random_policy_mask) info = policy_utilities.set_bandit_policy_type( info, bandit_policy_type) else: if epsilon_action.info: raise ValueError('Incompatible info field') info = () # The state of the epsilon greedy policy is the state of the underlying # greedy policy (the random policy carries no state). # It is commonly assumed that the new policy state only depends only # on the previous state and "time_step", the action (be it the greedy one # or the random one) does not influence the new policy state. state = greedy_action.state return policy_step.PolicyStep(action, state, info) def _distribution(self, time_step, policy_state): raise NotImplementedError( 'EpsilonGreedyPolicy does not support distributions yet.')
44.136364
109
0.686535
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import Optional, Text import tensorflow as tf import tensorflow_probability as tfp from tf_agents.bandits.policies import policy_utilities from tf_agents.policies import greedy_policy from tf_agents.policies import tf_policy from tf_agents.trajectories import policy_step from tf_agents.typing import types from tf_agents.utils import nest_utils tfd = tfp.distributions class EpsilonGreedyPolicy(tf_policy.TFPolicy): def __init__(self, policy: tf_policy.TFPolicy, epsilon: types.FloatOrReturningFloat, name: Optional[Text] = None): try: observation_and_action_constraint_splitter = ( policy.observation_and_action_constraint_splitter) except AttributeError: observation_and_action_constraint_splitter = None try: accepts_per_arm_features = policy.accepts_per_arm_features except AttributeError: accepts_per_arm_features = False self._greedy_policy = greedy_policy.GreedyPolicy(policy) self._epsilon = epsilon self._epsilon_policy = self._greedy_policy.wrapped_policy super(EpsilonGreedyPolicy, self).__init__( policy.time_step_spec, policy.action_spec, policy.policy_state_spec, policy.info_spec, emit_log_probability=policy.emit_log_probability, observation_and_action_constraint_splitter=( observation_and_action_constraint_splitter), name=name) @property def wrapped_policy(self) -> tf_policy.TFPolicy: return self._greedy_policy.wrapped_policy def _variables(self): return self._greedy_policy.variables() def _get_epsilon(self): if callable(self._epsilon): return self._epsilon() else: return self._epsilon def _action(self, time_step, policy_state, seed): seed_stream = tfp.util.SeedStream(seed=seed, salt='epsilon_greedy') greedy_action = self._greedy_policy.action(time_step, policy_state) epsilon_action = self._epsilon_policy.action(time_step, (), seed_stream()) outer_shape = nest_utils.get_outer_shape(time_step, self._time_step_spec) rng = tf.random.uniform( outer_shape, maxval=1.0, seed=seed_stream(), name='epsilon_rng') cond = tf.greater(rng, self._get_epsilon()) outer_ndims = int(outer_shape.shape[0]) if outer_ndims >= 2: raise ValueError( 'Only supports batched time steps with a single batch dimension') action = tf.nest.map_structure(lambda g, r: tf.compat.v1.where(cond, g, r), greedy_action.action, epsilon_action.action) if greedy_action.info: if not epsilon_action.info: raise ValueError('Incompatible info field') info = tf.nest.map_structure(lambda x, y: nest_utils.where(cond, x, y), greedy_action.info, epsilon_action.info) if self._emit_log_probability: # probability of choosing the policy. random_log_prob = tf.nest.map_structure( lambda t: tf.math.log(tf.zeros_like(t) + self._get_epsilon()), info.log_probability) greedy_log_prob = tf.nest.map_structure( lambda t: tf.math.log(tf.ones_like(t) - self._get_epsilon()), random_log_prob) log_prob_of_chosen_policy = nest_utils.where(cond, greedy_log_prob, random_log_prob) log_prob = tf.nest.map_structure(lambda a, b: a + b, log_prob_of_chosen_policy, info.log_probability) info = policy_step.set_log_probability(info, log_prob) # Overwrite bandit policy info type. if policy_utilities.has_bandit_policy_type(info, check_for_tensor=True): # Generate mask of the same shape as bandit_policy_type (batch_size, 1). # This is the opposite of `cond`, which is 1-D bool tensor (batch_size,) # that is true when greedy policy was used, otherwise `cond` is false. random_policy_mask = tf.reshape(tf.logical_not(cond), tf.shape(info.bandit_policy_type)) bandit_policy_type = policy_utilities.bandit_policy_uniform_mask( info.bandit_policy_type, mask=random_policy_mask) info = policy_utilities.set_bandit_policy_type( info, bandit_policy_type) else: if epsilon_action.info: raise ValueError('Incompatible info field') info = () # The state of the epsilon greedy policy is the state of the underlying # greedy policy (the random policy carries no state). # It is commonly assumed that the new policy state only depends only # on the previous state and "time_step", the action (be it the greedy one # or the random one) does not influence the new policy state. state = greedy_action.state return policy_step.PolicyStep(action, state, info) def _distribution(self, time_step, policy_state): raise NotImplementedError( 'EpsilonGreedyPolicy does not support distributions yet.')
true
true
f703a9c862cb7b5a61c531b4c724bf4d85a76283
792
py
Python
Perro/tienda.py
SebaB29/Python
8fe7b375e200d2a629e3ef83a2356002621267a6
[ "MIT" ]
null
null
null
Perro/tienda.py
SebaB29/Python
8fe7b375e200d2a629e3ef83a2356002621267a6
[ "MIT" ]
null
null
null
Perro/tienda.py
SebaB29/Python
8fe7b375e200d2a629e3ef83a2356002621267a6
[ "MIT" ]
null
null
null
from os import system def comprar(comida, juguetes): comprado = "" while not comprado: system("cls") comprar = (input("Que quiere comprar? Alimentos | Juguetes : ")).lower() if comprar == "alimento": print(f"Carne: {comida['carne']['cantidad']}|Agua: {comida['agua']['cantidad']}|Huesos: {comida['hueso']['cantidad']}") producto = (input("Que queres comprar?: ")).lower() if producto in comida.keys(): cantidad = input("Cuánto quieres comprar?: ") if cantidad.isdecimal(): comida[producto]['cantidad'] += int(cantidad) comprado = producto if comprar == "juguete": print("Pelota | Soga | Muñeco") producto = (input("Que quieres comprar?: ")).lower() if producto in juguetes.keys(): juguetes[producto] = "si" comprado = producto
24.75
122
0.638889
from os import system def comprar(comida, juguetes): comprado = "" while not comprado: system("cls") comprar = (input("Que quiere comprar? Alimentos | Juguetes : ")).lower() if comprar == "alimento": print(f"Carne: {comida['carne']['cantidad']}|Agua: {comida['agua']['cantidad']}|Huesos: {comida['hueso']['cantidad']}") producto = (input("Que queres comprar?: ")).lower() if producto in comida.keys(): cantidad = input("Cuánto quieres comprar?: ") if cantidad.isdecimal(): comida[producto]['cantidad'] += int(cantidad) comprado = producto if comprar == "juguete": print("Pelota | Soga | Muñeco") producto = (input("Que quieres comprar?: ")).lower() if producto in juguetes.keys(): juguetes[producto] = "si" comprado = producto
true
true
f703ab7f383253a03a598122cf344f36aac5f4c1
671
py
Python
mlfinlab/microstructural_features/third_generation.py
scibol/mlfinlab
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
[ "BSD-3-Clause" ]
8
2020-04-19T08:09:34.000Z
2022-03-30T20:49:40.000Z
mlfinlab/microstructural_features/third_generation.py
scibol/mlfinlab
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
[ "BSD-3-Clause" ]
1
2019-07-24T17:52:30.000Z
2019-07-24T17:52:30.000Z
mlfinlab/microstructural_features/third_generation.py
scibol/mlfinlab
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
[ "BSD-3-Clause" ]
8
2020-08-09T02:25:04.000Z
2022-03-20T15:08:11.000Z
""" Third generation models implementation (VPIN) """ import pandas as pd def get_vpin(volume: pd.Series, buy_volume: pd.Series, window: int = 1) -> pd.Series: """ Get Volume-Synchronized Probability of Informed Trading (VPIN) from bars, p. 292-293. :param volume: (pd.Series) bar volume :param buy_volume: (pd.Series) bar volume classified as buy (either tick rule, BVC or aggressor side methods applied) :param window: (int) estimation window :return: (pd.Series) VPIN series """ sell_volume = volume - buy_volume volume_imbalance = abs(buy_volume - sell_volume) return volume_imbalance.rolling(window=window).mean() / volume
35.315789
121
0.710879
import pandas as pd def get_vpin(volume: pd.Series, buy_volume: pd.Series, window: int = 1) -> pd.Series: sell_volume = volume - buy_volume volume_imbalance = abs(buy_volume - sell_volume) return volume_imbalance.rolling(window=window).mean() / volume
true
true
f703abc44854a43f352a92f4fe2b31a889a7c5f4
938
py
Python
github3/gists/comment.py
kbakba/github3.py
ab5d6a59a3e3f3c7022184b0e5dd17968809dc03
[ "BSD-3-Clause" ]
null
null
null
github3/gists/comment.py
kbakba/github3.py
ab5d6a59a3e3f3c7022184b0e5dd17968809dc03
[ "BSD-3-Clause" ]
7
2021-02-08T20:22:15.000Z
2022-03-11T23:19:41.000Z
venv/lib/python2.7/site-packages/github3/gists/comment.py
mutaihillary/mycalculator
55685dd7c968861f18ae0701129f5af2bc682d67
[ "MIT" ]
null
null
null
""" github3.gists.comment --------------------- Module containing the logic for a GistComment """ from github3.models import BaseComment from github3.users import User class GistComment(BaseComment): """This object represents a comment on a gist. Two comment instances can be checked like so:: c1 == c2 c1 != c2 And is equivalent to:: c1.id == c2.id c1.id != c2.id See also: http://developer.github.com/v3/gists/comments/ """ def __init__(self, comment, session=None): super(GistComment, self).__init__(comment, session) #: :class:`User <github3.users.User>` who made the comment #: Unless it is not associated with an account self.user = None if comment.get('user'): self.user = User(comment.get('user'), self) # (No coverage) def __repr__(self): return '<Gist Comment [{0}]>'.format(self.user.login)
22.333333
72
0.613006
from github3.models import BaseComment from github3.users import User class GistComment(BaseComment): def __init__(self, comment, session=None): super(GistComment, self).__init__(comment, session) self.user = None if comment.get('user'): self.user = User(comment.get('user'), self) def __repr__(self): return '<Gist Comment [{0}]>'.format(self.user.login)
true
true
f703adb6f717d60f71886dce78d79117e78f66ac
5,223
py
Python
NeuralNetwork.py
ronnith24/NeuralNetworksFromScratch
5c831de8954a4b84fef7b70b16f9d9e6c1cb24b9
[ "MIT" ]
null
null
null
NeuralNetwork.py
ronnith24/NeuralNetworksFromScratch
5c831de8954a4b84fef7b70b16f9d9e6c1cb24b9
[ "MIT" ]
null
null
null
NeuralNetwork.py
ronnith24/NeuralNetworksFromScratch
5c831de8954a4b84fef7b70b16f9d9e6c1cb24b9
[ "MIT" ]
null
null
null
import numpy as np class NeuralNetwork(object): def __init__(self, topology, epsilon, numLabels): self.theta = [] self.topology = topology self.numLabels = numLabels self.gradientChecking = False for layer in range(len(self.topology)): if layer == 0: continue self.theta.append(np.random.rand(self.topology[layer], self.topology[layer - 1] + 1) * 2 * epsilon - epsilon) def gradientDescent(self, iters, alpha, lamda, X, Y): self.X = X self.Y = Y for i in range(iters): (J, thetaGrad) = self.getCostAndGradient(lamda) # gradient checking if self.gradientChecking: thetaCopy = self.theta.copy() for i in range(len(self.topology) - 1): for j in range(self.topology[i + 1]): for k in range(self.topology[i]): EPS = 0.00001 self.theta[i][j, k] += EPS J2 = self.getCostAndGradient(lamda)[0] self.theta[i][j, k] -= 2 * EPS J1 = self.getCostAndGradient(lamda)[0] print(str((J2 - J1) / (2 * EPS) - thetaGrad[i][j, k])) self.theta = thetaCopy # end for layer in range(len(self.topology) - 1): self.theta[layer] -= thetaGrad[layer] * alpha print("Iter " + str(i) + ": " + str(J)) def predict(self, x): x = x.reshape((x.shape[0], 1)) x = np.concatenate(([[1]], x)) for layer in range(1, len(self.topology)): x = np.matmul(self.theta[layer - 1], x) for i in range(x.shape[0]): x[i, 0] = self.sigmoid(x[i, 0]) if layer != len(self.topology) - 1: x = np.concatenate(([[1]], x)) prediction = -1 predictionSurety = -1 for i in range(self.numLabels): if x[i, 0] > predictionSurety: prediction = i predictionSurety = x[i, 0] return prediction def getCostAndGradient(self, lamda): J = 0 thetaGrad = [] for layer in range(len(self.topology)): if layer == 0: continue thetaGrad.append(np.zeros((self.topology[layer], self.topology[layer - 1] + 1))) m = self.X.shape[0] for example in range(m): x = self.X[example].copy() x = x.reshape((x.shape[0], 1)) y = np.zeros(self.numLabels) y[self.Y[example]] = 1 y = y.reshape((y.shape[0], 1)) a = [] z = [] delta = [] for layer in range(len(self.topology)): if layer == 0: a.append(np.concatenate(([[1]], x))) z.append(np.concatenate(([[1]], x))) delta.append(0) continue z.append(np.matmul(self.theta[layer - 1], a[layer - 1])) a.append(z[layer].copy()) for i in range(self.topology[layer]): a[layer][i, 0] = self.sigmoid(a[layer][i, 0]) if layer != len(self.topology) - 1: a[layer] = np.concatenate(([[1]], a[layer])) z[layer] = np.concatenate(([[1]], z[layer])) delta.append(0) for layer in range(len(self.topology) - 1, 0, -1): if layer == len(self.topology) - 1: delta[layer] = a[layer] - y thetaGrad[layer - 1] += np.matmul(delta[layer], a[layer - 1].transpose()) continue sigDerZ = z[layer].copy() for i in range(self.topology[layer] + 1): sigDerZ[i] = self.sigmoidDerivative(sigDerZ[i]) if layer >= len(self.topology) - 2: delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1]) * sigDerZ else: delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1][1:, :]) * sigDerZ thetaGrad[layer - 1] += np.matmul(delta[layer][1:, :], a[layer - 1].transpose()) J += np.sum(-(1 - y) * np.log(1 - a[len(self.topology) - 1])) - np.sum(y * np.log(a[len(self.topology) - 1])) J /= m for layer in range(len(self.topology) - 1): thetaGrad[layer] *= (1 / m) for i in range(len(self.topology) - 1): for j in range(self.topology[i + 1]): for k in range(1, self.topology[i]): J += (lamda / (2 * m)) * self.theta[i][j, k] ** 2 thetaGrad[i][j, k] += (lamda / m) * self.theta[i][j, k] return (J, thetaGrad) def sigmoid(self, x): return 1 / (1 + np.exp(-x)) def sigmoidDerivative(self, x): sig = self.sigmoid(x) return sig * (1 - sig)
39.568182
121
0.450699
import numpy as np class NeuralNetwork(object): def __init__(self, topology, epsilon, numLabels): self.theta = [] self.topology = topology self.numLabels = numLabels self.gradientChecking = False for layer in range(len(self.topology)): if layer == 0: continue self.theta.append(np.random.rand(self.topology[layer], self.topology[layer - 1] + 1) * 2 * epsilon - epsilon) def gradientDescent(self, iters, alpha, lamda, X, Y): self.X = X self.Y = Y for i in range(iters): (J, thetaGrad) = self.getCostAndGradient(lamda) if self.gradientChecking: thetaCopy = self.theta.copy() for i in range(len(self.topology) - 1): for j in range(self.topology[i + 1]): for k in range(self.topology[i]): EPS = 0.00001 self.theta[i][j, k] += EPS J2 = self.getCostAndGradient(lamda)[0] self.theta[i][j, k] -= 2 * EPS J1 = self.getCostAndGradient(lamda)[0] print(str((J2 - J1) / (2 * EPS) - thetaGrad[i][j, k])) self.theta = thetaCopy for layer in range(len(self.topology) - 1): self.theta[layer] -= thetaGrad[layer] * alpha print("Iter " + str(i) + ": " + str(J)) def predict(self, x): x = x.reshape((x.shape[0], 1)) x = np.concatenate(([[1]], x)) for layer in range(1, len(self.topology)): x = np.matmul(self.theta[layer - 1], x) for i in range(x.shape[0]): x[i, 0] = self.sigmoid(x[i, 0]) if layer != len(self.topology) - 1: x = np.concatenate(([[1]], x)) prediction = -1 predictionSurety = -1 for i in range(self.numLabels): if x[i, 0] > predictionSurety: prediction = i predictionSurety = x[i, 0] return prediction def getCostAndGradient(self, lamda): J = 0 thetaGrad = [] for layer in range(len(self.topology)): if layer == 0: continue thetaGrad.append(np.zeros((self.topology[layer], self.topology[layer - 1] + 1))) m = self.X.shape[0] for example in range(m): x = self.X[example].copy() x = x.reshape((x.shape[0], 1)) y = np.zeros(self.numLabels) y[self.Y[example]] = 1 y = y.reshape((y.shape[0], 1)) a = [] z = [] delta = [] for layer in range(len(self.topology)): if layer == 0: a.append(np.concatenate(([[1]], x))) z.append(np.concatenate(([[1]], x))) delta.append(0) continue z.append(np.matmul(self.theta[layer - 1], a[layer - 1])) a.append(z[layer].copy()) for i in range(self.topology[layer]): a[layer][i, 0] = self.sigmoid(a[layer][i, 0]) if layer != len(self.topology) - 1: a[layer] = np.concatenate(([[1]], a[layer])) z[layer] = np.concatenate(([[1]], z[layer])) delta.append(0) for layer in range(len(self.topology) - 1, 0, -1): if layer == len(self.topology) - 1: delta[layer] = a[layer] - y thetaGrad[layer - 1] += np.matmul(delta[layer], a[layer - 1].transpose()) continue sigDerZ = z[layer].copy() for i in range(self.topology[layer] + 1): sigDerZ[i] = self.sigmoidDerivative(sigDerZ[i]) if layer >= len(self.topology) - 2: delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1]) * sigDerZ else: delta[layer] = np.matmul(self.theta[layer].transpose(), delta[layer + 1][1:, :]) * sigDerZ thetaGrad[layer - 1] += np.matmul(delta[layer][1:, :], a[layer - 1].transpose()) J += np.sum(-(1 - y) * np.log(1 - a[len(self.topology) - 1])) - np.sum(y * np.log(a[len(self.topology) - 1])) J /= m for layer in range(len(self.topology) - 1): thetaGrad[layer] *= (1 / m) for i in range(len(self.topology) - 1): for j in range(self.topology[i + 1]): for k in range(1, self.topology[i]): J += (lamda / (2 * m)) * self.theta[i][j, k] ** 2 thetaGrad[i][j, k] += (lamda / m) * self.theta[i][j, k] return (J, thetaGrad) def sigmoid(self, x): return 1 / (1 + np.exp(-x)) def sigmoidDerivative(self, x): sig = self.sigmoid(x) return sig * (1 - sig)
true
true
f703ae182509e8225f54eb3a2bec3dbf5e9423a8
282
py
Python
chapter09/exercise02.py
YordanIH/Intro_to_CS_w_Python
eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a
[ "MIT" ]
null
null
null
chapter09/exercise02.py
YordanIH/Intro_to_CS_w_Python
eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a
[ "MIT" ]
null
null
null
chapter09/exercise02.py
YordanIH/Intro_to_CS_w_Python
eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a
[ "MIT" ]
null
null
null
"""“Write a for loop to print all the values in the half_lives list from ​Operations on Lists​, all on a single line. half_lives refers to [87.74, 24110.0, 6537.0, 14.4, 376000.0].""" half_lives = [87.74, 24110.0, 6537.0, 14.4, 376000.0] for i in half_lives: print(i , end=' ')
56.4
183
0.677305
half_lives = [87.74, 24110.0, 6537.0, 14.4, 376000.0] for i in half_lives: print(i , end=' ')
true
true
f703ae2bb9e374037093786ce006ceb2ba2185c3
3,238
py
Python
swagger_to/bin/swagger_style.py
abingham/swagger-to
a1ef9f46561d39809da0e6ab356427a247815d92
[ "MIT" ]
38
2018-08-06T15:11:10.000Z
2022-02-13T22:43:00.000Z
swagger_to/bin/swagger_style.py
abingham/swagger-to
a1ef9f46561d39809da0e6ab356427a247815d92
[ "MIT" ]
42
2018-08-07T08:25:07.000Z
2021-11-28T19:32:48.000Z
swagger_to/bin/swagger_style.py
abingham/swagger-to
a1ef9f46561d39809da0e6ab356427a247815d92
[ "MIT" ]
16
2019-02-26T12:39:43.000Z
2022-01-29T06:38:41.000Z
#!/usr/bin/env python3 """Read a correct swagger file and check whether it conforms to a style guide.""" import argparse import pathlib from typing import List import sys import swagger_to.intermediate import swagger_to.style import swagger_to.swagger def main() -> int: """Execute the main routine.""" parser = argparse.ArgumentParser("Reads a correct swagger file and checks that it conforms to the style guide.") parser.add_argument("--swagger_path", help="path to the swagger file", required=True) parser.add_argument("--verbose", help="if set, prints as much information as possible.", action="store_true") parser.add_argument( "--with_line_number", help="if set, prints the errors with the corresponding file name and line number.", action="store_true") args = parser.parse_args() assert isinstance(args.swagger_path, str) assert isinstance(args.verbose, bool) assert isinstance(args.with_line_number, bool) swagger_path = pathlib.Path(args.swagger_path) if not swagger_path.exists(): print("File not found error: Swagger file does not exist: {}".format(swagger_path)) return 2 swagger, errs = swagger_to.swagger.parse_yaml_file(path=swagger_path) if errs: print("Value error: Failed to parse Swagger file {}:\n{}".format(swagger_path, "\n".join(errs))) return 2 intermediate_typedefs = swagger_to.intermediate.to_typedefs(swagger=swagger) intermediate_params = swagger_to.intermediate.to_parameters(swagger=swagger, typedefs=intermediate_typedefs) endpoints = swagger_to.intermediate.to_endpoints( swagger=swagger, typedefs=intermediate_typedefs, params=intermediate_params) result = swagger_to.style.perform(swagger=swagger, typedefs=intermediate_typedefs, endpoints=endpoints) if result: complaints = '\n'.join( format_complaints( complaints=result, swagger_path=str(swagger_path), verbose=args.verbose, with_line_number=args.with_line_number)) print("Style checks failed: \n{}".format(complaints)) return 1 print("Style checks succeeded.") return 0 def format_complaints(complaints: List[swagger_to.style.Complaint], swagger_path: str, verbose: bool, with_line_number: bool) -> List[str]: """ Convert a list of complaints into a well-formatted list of error messages. :param complaints: :param swagger_path: :param verbose: :param with_line_number: :return: """ if with_line_number: complaints.sort(key=lambda complaint: complaint.line) complaints_str = [] # type: List[str] for complaint in complaints: complaint_str = '' if with_line_number: complaint_str += "{}:{} ".format(swagger_path, complaint.line) else: complaint_str += "{}: ".format(complaint.where) complaint_str += "{} ".format(complaint.message) if verbose: complaint_str += "\"{}\"".format(complaint.what.replace('\n', ' ')) complaints_str.append(complaint_str) return complaints_str if __name__ == "__main__": sys.exit(main())
33.381443
116
0.679432
import argparse import pathlib from typing import List import sys import swagger_to.intermediate import swagger_to.style import swagger_to.swagger def main() -> int: parser = argparse.ArgumentParser("Reads a correct swagger file and checks that it conforms to the style guide.") parser.add_argument("--swagger_path", help="path to the swagger file", required=True) parser.add_argument("--verbose", help="if set, prints as much information as possible.", action="store_true") parser.add_argument( "--with_line_number", help="if set, prints the errors with the corresponding file name and line number.", action="store_true") args = parser.parse_args() assert isinstance(args.swagger_path, str) assert isinstance(args.verbose, bool) assert isinstance(args.with_line_number, bool) swagger_path = pathlib.Path(args.swagger_path) if not swagger_path.exists(): print("File not found error: Swagger file does not exist: {}".format(swagger_path)) return 2 swagger, errs = swagger_to.swagger.parse_yaml_file(path=swagger_path) if errs: print("Value error: Failed to parse Swagger file {}:\n{}".format(swagger_path, "\n".join(errs))) return 2 intermediate_typedefs = swagger_to.intermediate.to_typedefs(swagger=swagger) intermediate_params = swagger_to.intermediate.to_parameters(swagger=swagger, typedefs=intermediate_typedefs) endpoints = swagger_to.intermediate.to_endpoints( swagger=swagger, typedefs=intermediate_typedefs, params=intermediate_params) result = swagger_to.style.perform(swagger=swagger, typedefs=intermediate_typedefs, endpoints=endpoints) if result: complaints = '\n'.join( format_complaints( complaints=result, swagger_path=str(swagger_path), verbose=args.verbose, with_line_number=args.with_line_number)) print("Style checks failed: \n{}".format(complaints)) return 1 print("Style checks succeeded.") return 0 def format_complaints(complaints: List[swagger_to.style.Complaint], swagger_path: str, verbose: bool, with_line_number: bool) -> List[str]: if with_line_number: complaints.sort(key=lambda complaint: complaint.line) complaints_str = [] for complaint in complaints: complaint_str = '' if with_line_number: complaint_str += "{}:{} ".format(swagger_path, complaint.line) else: complaint_str += "{}: ".format(complaint.where) complaint_str += "{} ".format(complaint.message) if verbose: complaint_str += "\"{}\"".format(complaint.what.replace('\n', ' ')) complaints_str.append(complaint_str) return complaints_str if __name__ == "__main__": sys.exit(main())
true
true
f703ae464ea3f820d910eceb27441786cfb8c1dd
12,829
py
Python
sdk/python/v1beta1/kubeflow/katib/models/v1beta1_experiment_spec.py
oneconvergence/katib
16e0574647ace79ccedd248d072c77139feab5e5
[ "Apache-2.0" ]
1,177
2018-04-23T08:45:19.000Z
2022-03-23T19:09:13.000Z
sdk/python/v1beta1/kubeflow/katib/models/v1beta1_experiment_spec.py
oneconvergence/katib
16e0574647ace79ccedd248d072c77139feab5e5
[ "Apache-2.0" ]
1,791
2018-04-20T00:10:17.000Z
2022-03-31T18:18:36.000Z
sdk/python/v1beta1/kubeflow/katib/models/v1beta1_experiment_spec.py
oneconvergence/katib
16e0574647ace79ccedd248d072c77139feab5e5
[ "Apache-2.0" ]
349
2018-04-20T01:03:28.000Z
2022-03-30T16:11:35.000Z
# coding: utf-8 """ Katib Swagger description for Katib # noqa: E501 The version of the OpenAPI document: v1beta1-0.1 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubeflow.katib.configuration import Configuration class V1beta1ExperimentSpec(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'algorithm': 'V1beta1AlgorithmSpec', 'early_stopping': 'V1beta1EarlyStoppingSpec', 'max_failed_trial_count': 'int', 'max_trial_count': 'int', 'metrics_collector_spec': 'V1beta1MetricsCollectorSpec', 'nas_config': 'V1beta1NasConfig', 'objective': 'V1beta1ObjectiveSpec', 'parallel_trial_count': 'int', 'parameters': 'list[V1beta1ParameterSpec]', 'resume_policy': 'str', 'trial_template': 'V1beta1TrialTemplate' } attribute_map = { 'algorithm': 'algorithm', 'early_stopping': 'earlyStopping', 'max_failed_trial_count': 'maxFailedTrialCount', 'max_trial_count': 'maxTrialCount', 'metrics_collector_spec': 'metricsCollectorSpec', 'nas_config': 'nasConfig', 'objective': 'objective', 'parallel_trial_count': 'parallelTrialCount', 'parameters': 'parameters', 'resume_policy': 'resumePolicy', 'trial_template': 'trialTemplate' } def __init__(self, algorithm=None, early_stopping=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None, local_vars_configuration=None): # noqa: E501 """V1beta1ExperimentSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._algorithm = None self._early_stopping = None self._max_failed_trial_count = None self._max_trial_count = None self._metrics_collector_spec = None self._nas_config = None self._objective = None self._parallel_trial_count = None self._parameters = None self._resume_policy = None self._trial_template = None self.discriminator = None if algorithm is not None: self.algorithm = algorithm if early_stopping is not None: self.early_stopping = early_stopping if max_failed_trial_count is not None: self.max_failed_trial_count = max_failed_trial_count if max_trial_count is not None: self.max_trial_count = max_trial_count if metrics_collector_spec is not None: self.metrics_collector_spec = metrics_collector_spec if nas_config is not None: self.nas_config = nas_config if objective is not None: self.objective = objective if parallel_trial_count is not None: self.parallel_trial_count = parallel_trial_count if parameters is not None: self.parameters = parameters if resume_policy is not None: self.resume_policy = resume_policy if trial_template is not None: self.trial_template = trial_template @property def algorithm(self): """Gets the algorithm of this V1beta1ExperimentSpec. # noqa: E501 :return: The algorithm of this V1beta1ExperimentSpec. # noqa: E501 :rtype: V1beta1AlgorithmSpec """ return self._algorithm @algorithm.setter def algorithm(self, algorithm): """Sets the algorithm of this V1beta1ExperimentSpec. :param algorithm: The algorithm of this V1beta1ExperimentSpec. # noqa: E501 :type: V1beta1AlgorithmSpec """ self._algorithm = algorithm @property def early_stopping(self): """Gets the early_stopping of this V1beta1ExperimentSpec. # noqa: E501 :return: The early_stopping of this V1beta1ExperimentSpec. # noqa: E501 :rtype: V1beta1EarlyStoppingSpec """ return self._early_stopping @early_stopping.setter def early_stopping(self, early_stopping): """Sets the early_stopping of this V1beta1ExperimentSpec. :param early_stopping: The early_stopping of this V1beta1ExperimentSpec. # noqa: E501 :type: V1beta1EarlyStoppingSpec """ self._early_stopping = early_stopping @property def max_failed_trial_count(self): """Gets the max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501 Max failed trials to mark experiment as failed. # noqa: E501 :return: The max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501 :rtype: int """ return self._max_failed_trial_count @max_failed_trial_count.setter def max_failed_trial_count(self, max_failed_trial_count): """Sets the max_failed_trial_count of this V1beta1ExperimentSpec. Max failed trials to mark experiment as failed. # noqa: E501 :param max_failed_trial_count: The max_failed_trial_count of this V1beta1ExperimentSpec. # noqa: E501 :type: int """ self._max_failed_trial_count = max_failed_trial_count @property def max_trial_count(self): """Gets the max_trial_count of this V1beta1ExperimentSpec. # noqa: E501 Max completed trials to mark experiment as succeeded # noqa: E501 :return: The max_trial_count of this V1beta1ExperimentSpec. # noqa: E501 :rtype: int """ return self._max_trial_count @max_trial_count.setter def max_trial_count(self, max_trial_count): """Sets the max_trial_count of this V1beta1ExperimentSpec. Max completed trials to mark experiment as succeeded # noqa: E501 :param max_trial_count: The max_trial_count of this V1beta1ExperimentSpec. # noqa: E501 :type: int """ self._max_trial_count = max_trial_count @property def metrics_collector_spec(self): """Gets the metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501 :return: The metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501 :rtype: V1beta1MetricsCollectorSpec """ return self._metrics_collector_spec @metrics_collector_spec.setter def metrics_collector_spec(self, metrics_collector_spec): """Sets the metrics_collector_spec of this V1beta1ExperimentSpec. :param metrics_collector_spec: The metrics_collector_spec of this V1beta1ExperimentSpec. # noqa: E501 :type: V1beta1MetricsCollectorSpec """ self._metrics_collector_spec = metrics_collector_spec @property def nas_config(self): """Gets the nas_config of this V1beta1ExperimentSpec. # noqa: E501 :return: The nas_config of this V1beta1ExperimentSpec. # noqa: E501 :rtype: V1beta1NasConfig """ return self._nas_config @nas_config.setter def nas_config(self, nas_config): """Sets the nas_config of this V1beta1ExperimentSpec. :param nas_config: The nas_config of this V1beta1ExperimentSpec. # noqa: E501 :type: V1beta1NasConfig """ self._nas_config = nas_config @property def objective(self): """Gets the objective of this V1beta1ExperimentSpec. # noqa: E501 :return: The objective of this V1beta1ExperimentSpec. # noqa: E501 :rtype: V1beta1ObjectiveSpec """ return self._objective @objective.setter def objective(self, objective): """Sets the objective of this V1beta1ExperimentSpec. :param objective: The objective of this V1beta1ExperimentSpec. # noqa: E501 :type: V1beta1ObjectiveSpec """ self._objective = objective @property def parallel_trial_count(self): """Gets the parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501 How many trials can be processed in parallel. Defaults to 3 # noqa: E501 :return: The parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501 :rtype: int """ return self._parallel_trial_count @parallel_trial_count.setter def parallel_trial_count(self, parallel_trial_count): """Sets the parallel_trial_count of this V1beta1ExperimentSpec. How many trials can be processed in parallel. Defaults to 3 # noqa: E501 :param parallel_trial_count: The parallel_trial_count of this V1beta1ExperimentSpec. # noqa: E501 :type: int """ self._parallel_trial_count = parallel_trial_count @property def parameters(self): """Gets the parameters of this V1beta1ExperimentSpec. # noqa: E501 List of hyperparameter configurations. # noqa: E501 :return: The parameters of this V1beta1ExperimentSpec. # noqa: E501 :rtype: list[V1beta1ParameterSpec] """ return self._parameters @parameters.setter def parameters(self, parameters): """Sets the parameters of this V1beta1ExperimentSpec. List of hyperparameter configurations. # noqa: E501 :param parameters: The parameters of this V1beta1ExperimentSpec. # noqa: E501 :type: list[V1beta1ParameterSpec] """ self._parameters = parameters @property def resume_policy(self): """Gets the resume_policy of this V1beta1ExperimentSpec. # noqa: E501 Describes resuming policy which usually take effect after experiment terminated. # noqa: E501 :return: The resume_policy of this V1beta1ExperimentSpec. # noqa: E501 :rtype: str """ return self._resume_policy @resume_policy.setter def resume_policy(self, resume_policy): """Sets the resume_policy of this V1beta1ExperimentSpec. Describes resuming policy which usually take effect after experiment terminated. # noqa: E501 :param resume_policy: The resume_policy of this V1beta1ExperimentSpec. # noqa: E501 :type: str """ self._resume_policy = resume_policy @property def trial_template(self): """Gets the trial_template of this V1beta1ExperimentSpec. # noqa: E501 :return: The trial_template of this V1beta1ExperimentSpec. # noqa: E501 :rtype: V1beta1TrialTemplate """ return self._trial_template @trial_template.setter def trial_template(self, trial_template): """Sets the trial_template of this V1beta1ExperimentSpec. :param trial_template: The trial_template of this V1beta1ExperimentSpec. # noqa: E501 :type: V1beta1TrialTemplate """ self._trial_template = trial_template def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1ExperimentSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1ExperimentSpec): return True return self.to_dict() != other.to_dict()
32.810742
303
0.654377
import pprint import re import six from kubeflow.katib.configuration import Configuration class V1beta1ExperimentSpec(object): openapi_types = { 'algorithm': 'V1beta1AlgorithmSpec', 'early_stopping': 'V1beta1EarlyStoppingSpec', 'max_failed_trial_count': 'int', 'max_trial_count': 'int', 'metrics_collector_spec': 'V1beta1MetricsCollectorSpec', 'nas_config': 'V1beta1NasConfig', 'objective': 'V1beta1ObjectiveSpec', 'parallel_trial_count': 'int', 'parameters': 'list[V1beta1ParameterSpec]', 'resume_policy': 'str', 'trial_template': 'V1beta1TrialTemplate' } attribute_map = { 'algorithm': 'algorithm', 'early_stopping': 'earlyStopping', 'max_failed_trial_count': 'maxFailedTrialCount', 'max_trial_count': 'maxTrialCount', 'metrics_collector_spec': 'metricsCollectorSpec', 'nas_config': 'nasConfig', 'objective': 'objective', 'parallel_trial_count': 'parallelTrialCount', 'parameters': 'parameters', 'resume_policy': 'resumePolicy', 'trial_template': 'trialTemplate' } def __init__(self, algorithm=None, early_stopping=None, max_failed_trial_count=None, max_trial_count=None, metrics_collector_spec=None, nas_config=None, objective=None, parallel_trial_count=None, parameters=None, resume_policy=None, trial_template=None, local_vars_configuration=None): if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._algorithm = None self._early_stopping = None self._max_failed_trial_count = None self._max_trial_count = None self._metrics_collector_spec = None self._nas_config = None self._objective = None self._parallel_trial_count = None self._parameters = None self._resume_policy = None self._trial_template = None self.discriminator = None if algorithm is not None: self.algorithm = algorithm if early_stopping is not None: self.early_stopping = early_stopping if max_failed_trial_count is not None: self.max_failed_trial_count = max_failed_trial_count if max_trial_count is not None: self.max_trial_count = max_trial_count if metrics_collector_spec is not None: self.metrics_collector_spec = metrics_collector_spec if nas_config is not None: self.nas_config = nas_config if objective is not None: self.objective = objective if parallel_trial_count is not None: self.parallel_trial_count = parallel_trial_count if parameters is not None: self.parameters = parameters if resume_policy is not None: self.resume_policy = resume_policy if trial_template is not None: self.trial_template = trial_template @property def algorithm(self): return self._algorithm @algorithm.setter def algorithm(self, algorithm): self._algorithm = algorithm @property def early_stopping(self): return self._early_stopping @early_stopping.setter def early_stopping(self, early_stopping): self._early_stopping = early_stopping @property def max_failed_trial_count(self): return self._max_failed_trial_count @max_failed_trial_count.setter def max_failed_trial_count(self, max_failed_trial_count): self._max_failed_trial_count = max_failed_trial_count @property def max_trial_count(self): return self._max_trial_count @max_trial_count.setter def max_trial_count(self, max_trial_count): self._max_trial_count = max_trial_count @property def metrics_collector_spec(self): return self._metrics_collector_spec @metrics_collector_spec.setter def metrics_collector_spec(self, metrics_collector_spec): self._metrics_collector_spec = metrics_collector_spec @property def nas_config(self): return self._nas_config @nas_config.setter def nas_config(self, nas_config): self._nas_config = nas_config @property def objective(self): return self._objective @objective.setter def objective(self, objective): self._objective = objective @property def parallel_trial_count(self): return self._parallel_trial_count @parallel_trial_count.setter def parallel_trial_count(self, parallel_trial_count): self._parallel_trial_count = parallel_trial_count @property def parameters(self): return self._parameters @parameters.setter def parameters(self, parameters): self._parameters = parameters @property def resume_policy(self): return self._resume_policy @resume_policy.setter def resume_policy(self, resume_policy): self._resume_policy = resume_policy @property def trial_template(self): return self._trial_template @trial_template.setter def trial_template(self, trial_template): self._trial_template = trial_template def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, V1beta1ExperimentSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): if not isinstance(other, V1beta1ExperimentSpec): return True return self.to_dict() != other.to_dict()
true
true
f703ae4c4ee6db57ecd36e86072f030656e6cc07
6,325
py
Python
image-segmentation/data_generators/kitti/kitti_dataset.py
swcho84/image-segmentation
ef9b9b3d832e9efe6f43522cc5ca0e17279d6608
[ "MIT" ]
64
2019-03-09T08:55:11.000Z
2022-01-27T07:08:02.000Z
image-segmentation/data_generators/kitti/kitti_dataset.py
swcho84/image-segmentation
ef9b9b3d832e9efe6f43522cc5ca0e17279d6608
[ "MIT" ]
2
2019-11-07T11:49:13.000Z
2020-01-16T14:39:03.000Z
image-segmentation/data_generators/kitti/kitti_dataset.py
swcho84/image-segmentation
ef9b9b3d832e9efe6f43522cc5ca0e17279d6608
[ "MIT" ]
21
2019-03-09T08:56:35.000Z
2022-03-02T12:24:43.000Z
from collections import namedtuple import os import json import numpy as np from tqdm import tqdm from data_generators.utils import load_image_rgb # Copied from: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # # Cityscapes labels # #-------------------------------------------------------------------------------- # Definitions #-------------------------------------------------------------------------------- # a label and all meta information Label = namedtuple( 'Label' , [ 'name' , # The identifier of this label, e.g. 'car', 'person', ... . # We use them to uniquely name a class 'id' , # An integer ID that is associated with this label. # The IDs are used to represent the label in ground truth images # An ID of -1 means that this label does not have an ID and thus # is ignored when creating ground truth images (e.g. license plate). # Do not modify these IDs, since exactly these IDs are expected by the # evaluation server. 'trainId' , # Feel free to modify these IDs as suitable for your method. Then create # ground truth images with train IDs, using the tools provided in the # 'preparation' folder. However, make sure to validate or submit results # to our evaluation server using the regular IDs above! # For trainIds, multiple labels might have the same ID. Then, these labels # are mapped to the same class in the ground truth images. For the inverse # mapping, we use the label that is defined first in the list below. # For example, mapping all void-type classes to the same ID in training, # might make sense for some approaches. # Max value is 255! 'category' , # The name of the category that this label belongs to 'categoryId' , # The ID of this category. Used to create ground truth images # on category level. 'hasInstances', # Whether this label distinguishes between single instances or not 'ignoreInEval', # Whether pixels having this class as ground truth label are ignored # during evaluations or not 'color' , # The color of this label ] ) def label2dict(label): return { 'name': label.name, 'id': label.id, 'trainId': label.trainId, 'category': label.category, 'catId': label.categoryId, 'hasInstances': label.hasInstances, 'ignoreInEval': label.ignoreInEval, 'color': label.color } def save_labels(labels, fpath): l = [] for label in labels: l.append(label2dict(label)) fp = open(fpath, 'w') json.dump(l, fp) fp.close() def load_labels(fpath): fp = open(fpath, 'r') l = json.load(fp) fp.close() labels = [] for item in l: labels.append( Label( item['name'], item['id'], item['trainId'], item['category'], item['catId'], item['hasInstances'], item['ignoreInEval'], tuple(item['color'])) ) return labels class KittiDataset: def __init__(self): self.image_ids = [] def load_kitti(self, dataset_dir, subset, tag='simple'): 'Initialization' assert subset in ['train', 'val'], 'subset must be either train or val but {} is given'.format(subset) self.labels = load_labels(os.path.join(dataset_dir, 'annotations', 'semantic_{}.json'.format(tag))) # trainId to colors self.trainId2colors = {label.trainId: [] for label in self.labels} for label in self.labels: self.trainId2colors[label.trainId].append(label.color) # trainId to name self.trainId2name = {label.trainId: label.name for label in self.labels} # number of valid trainIds + background class self.num_classes = max([label.trainId for label in self.labels if label.trainId >= 0 and label.trainId < 255]) + 2 self.class_names = [self.trainId2name[i] for i in range(self.num_classes - 1)] self.image_dir = os.path.join(dataset_dir, subset, 'images') self.label_dir = os.path.join(dataset_dir, subset, 'semantic_rgb') assert os.path.exists(self.image_dir), 'No such directory: {}'.format(self.image_dir) assert os.path.exists(self.label_dir), 'No such directory: {}'.format(self.label_dir) self.image_files = sorted([x for x in os.listdir(self.image_dir) if x.lower().endswith('.png') or x.lower().endswith('.jpg')]) self.label_files = sorted([x for x in os.listdir(self.label_dir) if x.lower().endswith('.png')]) assert len(self.image_files) == len(self.label_files), \ 'image - label size mismatch! There are {} image files and {} label files'.format(len(self.image_files), len(self.label_files)) self.num_images = len(self.image_files) self.image_ids = np.arange(self.num_images) def check_sanity(self): for i in tqdm(self.image_ids): assert self.image_files[i][:-4] == self.label_files[i][:-4],\ 'image - label filename mismatch: {} - {}'.format(self.image_files[i], self.label_files[i]) img = load_image_rgb(os.path.join(self.image_dir, self.image_files[i])) msk = load_image_rgb(os.path.join(self.label_dir, self.label_files[i])) assert img.shape == msk.shape,\ 'img.shape: {}, msk.shape: {}'.format(img.shape, msk.shape) def load_image(self, image_id): return load_image_rgb(os.path.join(self.image_dir, self.image_files[image_id])) def load_mask(self, image_id): rgb_mask = load_image_rgb(os.path.join(self.label_dir, self.label_files[image_id])) mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1], self.num_classes - 1)) for cls in range(self.num_classes - 1): colors = self.trainId2colors[cls] cls_mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1])) for color in colors: cls_mask = np.logical_or(cls_mask, (rgb_mask == color).all(axis=2)) mask[:,:,cls] = cls_mask return mask
43.027211
139
0.606482
from collections import namedtuple import os import json import numpy as np from tqdm import tqdm from data_generators.utils import load_image_rgb Label = namedtuple( 'Label' , [ 'name' , 'id' , 'trainId' , 'category' , 'categoryId' , 'hasInstances', 'ignoreInEval', 'color' , ] ) def label2dict(label): return { 'name': label.name, 'id': label.id, 'trainId': label.trainId, 'category': label.category, 'catId': label.categoryId, 'hasInstances': label.hasInstances, 'ignoreInEval': label.ignoreInEval, 'color': label.color } def save_labels(labels, fpath): l = [] for label in labels: l.append(label2dict(label)) fp = open(fpath, 'w') json.dump(l, fp) fp.close() def load_labels(fpath): fp = open(fpath, 'r') l = json.load(fp) fp.close() labels = [] for item in l: labels.append( Label( item['name'], item['id'], item['trainId'], item['category'], item['catId'], item['hasInstances'], item['ignoreInEval'], tuple(item['color'])) ) return labels class KittiDataset: def __init__(self): self.image_ids = [] def load_kitti(self, dataset_dir, subset, tag='simple'): assert subset in ['train', 'val'], 'subset must be either train or val but {} is given'.format(subset) self.labels = load_labels(os.path.join(dataset_dir, 'annotations', 'semantic_{}.json'.format(tag))) self.trainId2colors = {label.trainId: [] for label in self.labels} for label in self.labels: self.trainId2colors[label.trainId].append(label.color) self.trainId2name = {label.trainId: label.name for label in self.labels} self.num_classes = max([label.trainId for label in self.labels if label.trainId >= 0 and label.trainId < 255]) + 2 self.class_names = [self.trainId2name[i] for i in range(self.num_classes - 1)] self.image_dir = os.path.join(dataset_dir, subset, 'images') self.label_dir = os.path.join(dataset_dir, subset, 'semantic_rgb') assert os.path.exists(self.image_dir), 'No such directory: {}'.format(self.image_dir) assert os.path.exists(self.label_dir), 'No such directory: {}'.format(self.label_dir) self.image_files = sorted([x for x in os.listdir(self.image_dir) if x.lower().endswith('.png') or x.lower().endswith('.jpg')]) self.label_files = sorted([x for x in os.listdir(self.label_dir) if x.lower().endswith('.png')]) assert len(self.image_files) == len(self.label_files), \ 'image - label size mismatch! There are {} image files and {} label files'.format(len(self.image_files), len(self.label_files)) self.num_images = len(self.image_files) self.image_ids = np.arange(self.num_images) def check_sanity(self): for i in tqdm(self.image_ids): assert self.image_files[i][:-4] == self.label_files[i][:-4],\ 'image - label filename mismatch: {} - {}'.format(self.image_files[i], self.label_files[i]) img = load_image_rgb(os.path.join(self.image_dir, self.image_files[i])) msk = load_image_rgb(os.path.join(self.label_dir, self.label_files[i])) assert img.shape == msk.shape,\ 'img.shape: {}, msk.shape: {}'.format(img.shape, msk.shape) def load_image(self, image_id): return load_image_rgb(os.path.join(self.image_dir, self.image_files[image_id])) def load_mask(self, image_id): rgb_mask = load_image_rgb(os.path.join(self.label_dir, self.label_files[image_id])) mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1], self.num_classes - 1)) for cls in range(self.num_classes - 1): colors = self.trainId2colors[cls] cls_mask = np.zeros((rgb_mask.shape[0], rgb_mask.shape[1])) for color in colors: cls_mask = np.logical_or(cls_mask, (rgb_mask == color).all(axis=2)) mask[:,:,cls] = cls_mask return mask
true
true
f703af1b52c4cc4a7f8d2dbe1ec2c5715036e113
5,921
py
Python
pymc4/distributions/tensorflow/continuous.py
byblian/pymc4
5de890ed7f22de878eb48c92d3e9b8fe87c25e61
[ "Apache-2.0" ]
null
null
null
pymc4/distributions/tensorflow/continuous.py
byblian/pymc4
5de890ed7f22de878eb48c92d3e9b8fe87c25e61
[ "Apache-2.0" ]
null
null
null
pymc4/distributions/tensorflow/continuous.py
byblian/pymc4
5de890ed7f22de878eb48c92d3e9b8fe87c25e61
[ "Apache-2.0" ]
null
null
null
"""PyMC4 continuous random variables for tensorflow.""" import tensorflow_probability as tfp from pymc4.distributions import abstract from pymc4.distributions.tensorflow.distribution import BackendDistribution tfd = tfp.distributions __all__ = [ "Beta", "Cauchy", "ChiSquared", "Exponential", "Gamma", "Gumbel", "HalfCauchy", "HalfNormal", "InverseGamma", "InverseGaussian", "Kumaraswamy", "Laplace", "LogNormal", "Logistic", "LogitNormal", "Normal", "Pareto", "StudentT", "Triangular", "Uniform", "VonMises", ] class Normal(BackendDistribution, abstract.Normal): __doc__ = r"""{} Developer Notes --------------- Parameter mappings to TensorFlow Probability are as follows: - mu: loc - sigma: scale """.format( abstract.Normal.__doc__ ) def _init_backend(self): mu, sigma = self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.Normal(loc=mu, scale=sigma) class HalfNormal(BackendDistribution, abstract.HalfNormal): __doc__ = r"""{} Developer Notes --------------- Parameter mappings to TensorFlow Probability are as follows: - sigma: scale """.format( abstract.HalfNormal.__doc__ ) def _init_backend(self): sigma = self.conditions["sigma"] self._backend_distribution = tfd.HalfNormal(scale=sigma) class Beta(BackendDistribution, abstract.Beta): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.Beta(concentration0=alpha, concentration1=beta) class Cauchy(BackendDistribution, abstract.Cauchy): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.Cauchy(loc=alpha, scale=beta) class ChiSquared(BackendDistribution, abstract.ChiSquared): def _init_backend(self): nu = self.conditions["nu"] self._backend_distribution = tfd.Chi2(df=nu) class Exponential(BackendDistribution, abstract.Exponential): def _init_backend(self): lam = self.conditions["lam"] self._backend_distribution = tfd.Exponential(rate=lam) class Gamma(BackendDistribution, abstract.Gamma): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.Gamma(concentration=alpha, rate=beta) class Gumbel(BackendDistribution, abstract.Gumbel): def _init_backend(self): mu, beta = self.conditions["mu"], self.conditions["beta"] self._backend_distribution = tfd.Gumbel(loc=mu, scale=beta) class HalfCauchy(BackendDistribution, abstract.HalfCauchy): def _init_backend(self): beta = self.conditions["beta"] self._backend_distribution = tfd.HalfCauchy(loc=0, scale=beta) class InverseGamma(BackendDistribution, abstract.InverseGamma): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.InverseGamma(concentration=alpha, scale=beta) class InverseGaussian(BackendDistribution, abstract.InverseGaussian): def _init_backend(self): mu, lam = self.conditions["mu"], self.conditions["lam"] self._backend_distribution = tfd.InverseGaussian(loc=mu, concentration=lam) class Kumaraswamy(BackendDistribution, abstract.Kumaraswamy): def _init_backend(self): a, b = self.conditions["a"], self.conditions["b"] self._backend_distribution = tfd.Kumaraswamy(concentration0=a, concentration1=b) class Laplace(BackendDistribution, abstract.Laplace): def _init_backend(self): mu, b = self.conditions["mu"], self.conditions["b"] self._backend_distribution = tfd.Laplace(loc=mu, scale=b) class Logistic(BackendDistribution, abstract.Logistic): def _init_backend(self): mu, s = self.conditions["mu"], self.conditions["s"] self._backend_distribution = tfd.Logistic(loc=mu, scale=s) class LogitNormal(BackendDistribution, abstract.LogitNormal): def _init_backend(self): mu, sigma = self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.TransformedDistribution( distribution=tfd.Normal(loc=mu, scale=sigma), bijector=tfp.bijectors.Sigmoid(), name="LogitNormal", ) class LogNormal(BackendDistribution, abstract.LogNormal): def _init_backend(self): mu, sigma = self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.LogNormal(loc=mu, scale=sigma) class Pareto(BackendDistribution, abstract.Pareto): def _init_backend(self): alpha, m = self.conditions["alpha"], self.conditions["m"] self._backend_distribution = tfd.Pareto(concentration=alpha, scale=m) class StudentT(BackendDistribution, abstract.StudentT): def _init_backend(self): nu, mu, sigma = self.conditions["nu"], self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.StudentT(df=nu, loc=mu, scale=sigma) class Triangular(BackendDistribution, abstract.Triangular): def _init_backend(self): lower, upper, c = self.conditions["lower"], self.conditions["upper"], self.conditions["c"] self._backend_distribution = tfd.Triangular(low=lower, high=upper, peak=c) class Uniform(BackendDistribution, abstract.Uniform): def _init_backend(self): lower, upper = self.conditions["lower"], self.conditions["upper"] self._backend_distribution = tfd.Uniform(low=lower, high=upper) class VonMises(BackendDistribution, abstract.VonMises): def _init_backend(self): mu, kappa = self.conditions["mu"], self.conditions["kappa"] self._backend_distribution = tfd.VonMises(loc=mu, concentration=kappa)
32.005405
98
0.694308
import tensorflow_probability as tfp from pymc4.distributions import abstract from pymc4.distributions.tensorflow.distribution import BackendDistribution tfd = tfp.distributions __all__ = [ "Beta", "Cauchy", "ChiSquared", "Exponential", "Gamma", "Gumbel", "HalfCauchy", "HalfNormal", "InverseGamma", "InverseGaussian", "Kumaraswamy", "Laplace", "LogNormal", "Logistic", "LogitNormal", "Normal", "Pareto", "StudentT", "Triangular", "Uniform", "VonMises", ] class Normal(BackendDistribution, abstract.Normal): __doc__ = r"""{} Developer Notes --------------- Parameter mappings to TensorFlow Probability are as follows: - mu: loc - sigma: scale """.format( abstract.Normal.__doc__ ) def _init_backend(self): mu, sigma = self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.Normal(loc=mu, scale=sigma) class HalfNormal(BackendDistribution, abstract.HalfNormal): __doc__ = r"""{} Developer Notes --------------- Parameter mappings to TensorFlow Probability are as follows: - sigma: scale """.format( abstract.HalfNormal.__doc__ ) def _init_backend(self): sigma = self.conditions["sigma"] self._backend_distribution = tfd.HalfNormal(scale=sigma) class Beta(BackendDistribution, abstract.Beta): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.Beta(concentration0=alpha, concentration1=beta) class Cauchy(BackendDistribution, abstract.Cauchy): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.Cauchy(loc=alpha, scale=beta) class ChiSquared(BackendDistribution, abstract.ChiSquared): def _init_backend(self): nu = self.conditions["nu"] self._backend_distribution = tfd.Chi2(df=nu) class Exponential(BackendDistribution, abstract.Exponential): def _init_backend(self): lam = self.conditions["lam"] self._backend_distribution = tfd.Exponential(rate=lam) class Gamma(BackendDistribution, abstract.Gamma): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.Gamma(concentration=alpha, rate=beta) class Gumbel(BackendDistribution, abstract.Gumbel): def _init_backend(self): mu, beta = self.conditions["mu"], self.conditions["beta"] self._backend_distribution = tfd.Gumbel(loc=mu, scale=beta) class HalfCauchy(BackendDistribution, abstract.HalfCauchy): def _init_backend(self): beta = self.conditions["beta"] self._backend_distribution = tfd.HalfCauchy(loc=0, scale=beta) class InverseGamma(BackendDistribution, abstract.InverseGamma): def _init_backend(self): alpha, beta = self.conditions["alpha"], self.conditions["beta"] self._backend_distribution = tfd.InverseGamma(concentration=alpha, scale=beta) class InverseGaussian(BackendDistribution, abstract.InverseGaussian): def _init_backend(self): mu, lam = self.conditions["mu"], self.conditions["lam"] self._backend_distribution = tfd.InverseGaussian(loc=mu, concentration=lam) class Kumaraswamy(BackendDistribution, abstract.Kumaraswamy): def _init_backend(self): a, b = self.conditions["a"], self.conditions["b"] self._backend_distribution = tfd.Kumaraswamy(concentration0=a, concentration1=b) class Laplace(BackendDistribution, abstract.Laplace): def _init_backend(self): mu, b = self.conditions["mu"], self.conditions["b"] self._backend_distribution = tfd.Laplace(loc=mu, scale=b) class Logistic(BackendDistribution, abstract.Logistic): def _init_backend(self): mu, s = self.conditions["mu"], self.conditions["s"] self._backend_distribution = tfd.Logistic(loc=mu, scale=s) class LogitNormal(BackendDistribution, abstract.LogitNormal): def _init_backend(self): mu, sigma = self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.TransformedDistribution( distribution=tfd.Normal(loc=mu, scale=sigma), bijector=tfp.bijectors.Sigmoid(), name="LogitNormal", ) class LogNormal(BackendDistribution, abstract.LogNormal): def _init_backend(self): mu, sigma = self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.LogNormal(loc=mu, scale=sigma) class Pareto(BackendDistribution, abstract.Pareto): def _init_backend(self): alpha, m = self.conditions["alpha"], self.conditions["m"] self._backend_distribution = tfd.Pareto(concentration=alpha, scale=m) class StudentT(BackendDistribution, abstract.StudentT): def _init_backend(self): nu, mu, sigma = self.conditions["nu"], self.conditions["mu"], self.conditions["sigma"] self._backend_distribution = tfd.StudentT(df=nu, loc=mu, scale=sigma) class Triangular(BackendDistribution, abstract.Triangular): def _init_backend(self): lower, upper, c = self.conditions["lower"], self.conditions["upper"], self.conditions["c"] self._backend_distribution = tfd.Triangular(low=lower, high=upper, peak=c) class Uniform(BackendDistribution, abstract.Uniform): def _init_backend(self): lower, upper = self.conditions["lower"], self.conditions["upper"] self._backend_distribution = tfd.Uniform(low=lower, high=upper) class VonMises(BackendDistribution, abstract.VonMises): def _init_backend(self): mu, kappa = self.conditions["mu"], self.conditions["kappa"] self._backend_distribution = tfd.VonMises(loc=mu, concentration=kappa)
true
true
f703afa79c2f13ad674c5444dff05b92c244c586
5,047
py
Python
src/datadog/azext_datadog/vendored_sdks/datadog/models/__init__.py
YingXue/azure-cli-extensions
30086b7fe22ed591daaae9019920db6c16aef9de
[ "MIT" ]
1
2020-09-16T03:47:44.000Z
2020-09-16T03:47:44.000Z
src/datadog/azext_datadog/vendored_sdks/datadog/models/__init__.py
YingXue/azure-cli-extensions
30086b7fe22ed591daaae9019920db6c16aef9de
[ "MIT" ]
null
null
null
src/datadog/azext_datadog/vendored_sdks/datadog/models/__init__.py
YingXue/azure-cli-extensions
30086b7fe22ed591daaae9019920db6c16aef9de
[ "MIT" ]
1
2019-05-02T00:55:30.000Z
2019-05-02T00:55:30.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- try: from ._models_py3 import DatadogApiKey from ._models_py3 import DatadogApiKeyListResponse from ._models_py3 import DatadogHost from ._models_py3 import DatadogHostListResponse from ._models_py3 import DatadogHostMetadata from ._models_py3 import DatadogInstallMethod from ._models_py3 import DatadogLogsAgent from ._models_py3 import DatadogMonitorResource from ._models_py3 import DatadogMonitorResourceListResponse from ._models_py3 import DatadogMonitorResourceUpdateParameters from ._models_py3 import DatadogOrganizationProperties from ._models_py3 import DatadogSetPasswordLink from ._models_py3 import DatadogSingleSignOnProperties from ._models_py3 import DatadogSingleSignOnResource from ._models_py3 import DatadogSingleSignOnResourceListResponse from ._models_py3 import ErrorResponseBody from ._models_py3 import FilteringTag from ._models_py3 import LinkedResource from ._models_py3 import LinkedResourceListResponse from ._models_py3 import MonitoredResource from ._models_py3 import MonitoredResourceListResponse from ._models_py3 import MonitoringTagRules from ._models_py3 import MonitoringTagRulesListResponse from ._models_py3 import OperationDisplay from ._models_py3 import OperationListResult from ._models_py3 import OperationResult from ._models_py3 import ResourceProviderDefaultErrorResponse from ._models_py3 import UserInfo except (SyntaxError, ImportError): from ._models import DatadogApiKey # type: ignore from ._models import DatadogApiKeyListResponse # type: ignore from ._models import DatadogHost # type: ignore from ._models import DatadogHostListResponse # type: ignore from ._models import DatadogHostMetadata # type: ignore from ._models import DatadogInstallMethod # type: ignore from ._models import DatadogLogsAgent # type: ignore from ._models import DatadogMonitorResource # type: ignore from ._models import DatadogMonitorResourceListResponse # type: ignore from ._models import DatadogMonitorResourceUpdateParameters # type: ignore from ._models import DatadogOrganizationProperties # type: ignore from ._models import DatadogSetPasswordLink # type: ignore from ._models import DatadogSingleSignOnProperties # type: ignore from ._models import DatadogSingleSignOnResource # type: ignore from ._models import DatadogSingleSignOnResourceListResponse # type: ignore from ._models import ErrorResponseBody # type: ignore from ._models import FilteringTag # type: ignore from ._models import LinkedResource # type: ignore from ._models import LinkedResourceListResponse # type: ignore from ._models import MonitoredResource # type: ignore from ._models import MonitoredResourceListResponse # type: ignore from ._models import MonitoringTagRules # type: ignore from ._models import MonitoringTagRulesListResponse # type: ignore from ._models import OperationDisplay # type: ignore from ._models import OperationListResult # type: ignore from ._models import OperationResult # type: ignore from ._models import ResourceProviderDefaultErrorResponse # type: ignore from ._models import UserInfo # type: ignore from ._microsoft_datadog_client_enums import ( LiftrResourceCategories, ManagedIdentityTypes, MarketplaceSubscriptionStatus, MonitoringStatus, ProvisioningState, SingleSignOnStates, TagAction, ) __all__ = [ 'DatadogApiKey', 'DatadogApiKeyListResponse', 'DatadogHost', 'DatadogHostListResponse', 'DatadogHostMetadata', 'DatadogInstallMethod', 'DatadogLogsAgent', 'DatadogMonitorResource', 'DatadogMonitorResourceListResponse', 'DatadogMonitorResourceUpdateParameters', 'DatadogOrganizationProperties', 'DatadogSetPasswordLink', 'DatadogSingleSignOnProperties', 'DatadogSingleSignOnResource', 'DatadogSingleSignOnResourceListResponse', 'ErrorResponseBody', 'FilteringTag', 'LinkedResource', 'LinkedResourceListResponse', 'MonitoredResource', 'MonitoredResourceListResponse', 'MonitoringTagRules', 'MonitoringTagRulesListResponse', 'OperationDisplay', 'OperationListResult', 'OperationResult', 'ResourceProviderDefaultErrorResponse', 'UserInfo', 'LiftrResourceCategories', 'ManagedIdentityTypes', 'MarketplaceSubscriptionStatus', 'MonitoringStatus', 'ProvisioningState', 'SingleSignOnStates', 'TagAction', ]
43.886957
94
0.753913
try: from ._models_py3 import DatadogApiKey from ._models_py3 import DatadogApiKeyListResponse from ._models_py3 import DatadogHost from ._models_py3 import DatadogHostListResponse from ._models_py3 import DatadogHostMetadata from ._models_py3 import DatadogInstallMethod from ._models_py3 import DatadogLogsAgent from ._models_py3 import DatadogMonitorResource from ._models_py3 import DatadogMonitorResourceListResponse from ._models_py3 import DatadogMonitorResourceUpdateParameters from ._models_py3 import DatadogOrganizationProperties from ._models_py3 import DatadogSetPasswordLink from ._models_py3 import DatadogSingleSignOnProperties from ._models_py3 import DatadogSingleSignOnResource from ._models_py3 import DatadogSingleSignOnResourceListResponse from ._models_py3 import ErrorResponseBody from ._models_py3 import FilteringTag from ._models_py3 import LinkedResource from ._models_py3 import LinkedResourceListResponse from ._models_py3 import MonitoredResource from ._models_py3 import MonitoredResourceListResponse from ._models_py3 import MonitoringTagRules from ._models_py3 import MonitoringTagRulesListResponse from ._models_py3 import OperationDisplay from ._models_py3 import OperationListResult from ._models_py3 import OperationResult from ._models_py3 import ResourceProviderDefaultErrorResponse from ._models_py3 import UserInfo except (SyntaxError, ImportError): from ._models import DatadogApiKey from ._models import DatadogApiKeyListResponse from ._models import DatadogHost from ._models import DatadogHostListResponse from ._models import DatadogHostMetadata from ._models import DatadogInstallMethod from ._models import DatadogLogsAgent from ._models import DatadogMonitorResource from ._models import DatadogMonitorResourceListResponse from ._models import DatadogMonitorResourceUpdateParameters from ._models import DatadogOrganizationProperties from ._models import DatadogSetPasswordLink from ._models import DatadogSingleSignOnProperties from ._models import DatadogSingleSignOnResource from ._models import DatadogSingleSignOnResourceListResponse from ._models import ErrorResponseBody from ._models import FilteringTag from ._models import LinkedResource from ._models import LinkedResourceListResponse from ._models import MonitoredResource from ._models import MonitoredResourceListResponse from ._models import MonitoringTagRules from ._models import MonitoringTagRulesListResponse from ._models import OperationDisplay from ._models import OperationListResult from ._models import OperationResult from ._models import ResourceProviderDefaultErrorResponse from ._models import UserInfo from ._microsoft_datadog_client_enums import ( LiftrResourceCategories, ManagedIdentityTypes, MarketplaceSubscriptionStatus, MonitoringStatus, ProvisioningState, SingleSignOnStates, TagAction, ) __all__ = [ 'DatadogApiKey', 'DatadogApiKeyListResponse', 'DatadogHost', 'DatadogHostListResponse', 'DatadogHostMetadata', 'DatadogInstallMethod', 'DatadogLogsAgent', 'DatadogMonitorResource', 'DatadogMonitorResourceListResponse', 'DatadogMonitorResourceUpdateParameters', 'DatadogOrganizationProperties', 'DatadogSetPasswordLink', 'DatadogSingleSignOnProperties', 'DatadogSingleSignOnResource', 'DatadogSingleSignOnResourceListResponse', 'ErrorResponseBody', 'FilteringTag', 'LinkedResource', 'LinkedResourceListResponse', 'MonitoredResource', 'MonitoredResourceListResponse', 'MonitoringTagRules', 'MonitoringTagRulesListResponse', 'OperationDisplay', 'OperationListResult', 'OperationResult', 'ResourceProviderDefaultErrorResponse', 'UserInfo', 'LiftrResourceCategories', 'ManagedIdentityTypes', 'MarketplaceSubscriptionStatus', 'MonitoringStatus', 'ProvisioningState', 'SingleSignOnStates', 'TagAction', ]
true
true
f703aff308996d0e92ba59c7db66cb2f74eb84ca
4,079
py
Python
vertica_python/vertica/messages/frontend_messages/startup.py
alonme/vertica-python
208685ce6285bde1edab6d18500ef0887d36bf91
[ "Apache-2.0" ]
183
2015-01-20T14:57:22.000Z
2018-08-09T21:13:19.000Z
vertica_python/vertica/messages/frontend_messages/startup.py
alonme/vertica-python
208685ce6285bde1edab6d18500ef0887d36bf91
[ "Apache-2.0" ]
139
2015-01-09T18:37:53.000Z
2018-08-13T07:09:26.000Z
vertica_python/vertica/messages/frontend_messages/startup.py
alonme/vertica-python
208685ce6285bde1edab6d18500ef0887d36bf91
[ "Apache-2.0" ]
110
2015-03-02T15:46:11.000Z
2018-07-27T15:50:29.000Z
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates. # Copyright (c) 2018 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) 2013-2017 Uber Technologies, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Startup message To begin a session, the frontend opens a connection to the backend and sends a Startup message. """ from __future__ import print_function, division, absolute_import import platform import os from struct import pack # noinspection PyUnresolvedReferences,PyCompatibility import vertica_python from ..message import BulkFrontendMessage class Startup(BulkFrontendMessage): message_id = None def __init__(self, user, database, session_label, os_user_name): BulkFrontendMessage.__init__(self) try: os_platform = platform.platform() except Exception as e: os_platform = '' print("WARN: Cannot get the OS info: {}".format(str(e))) try: pid = str(os.getpid()) except Exception as e: pid = '0' print("WARN: Cannot get the process ID: {}".format(str(e))) self.parameters = { b'user': user, b'database': database, b'client_label': session_label, b'client_type': 'vertica-python', b'client_version': vertica_python.__version__, b'client_os': os_platform, b'client_os_user_name': os_user_name, b'client_pid': pid, } def read_bytes(self): # The fixed protocol version is followed by pairs of parameter name and value strings. # A zero byte is required as a terminator after the last name/value pair. # Parameters can appear in any order. fixed_protocol_version = 3 << 16 | 5 bytes_ = pack('!I', fixed_protocol_version) # The frontend sends a requested protocol version to the backend. # Old servers (protocol < 3.7) ignore this value and use the fixed protocol version. # New servers (protocol >= 3.7) would try to find the common protocol # version in use for both client and server, and send back a ParameterStatus # message (key='protocol_version', value=<effective protocol version>) bytes_ += pack('!16sxIx', b'protocol_version', vertica_python.PROTOCOL_VERSION) for k in self.parameters: v = self.parameters[k].encode('utf-8') bytes_ += pack('!{}sx{}sx'.format(len(k), len(v)), k, v) bytes_ += pack('x') return bytes_
39.221154
94
0.694778
from __future__ import print_function, division, absolute_import import platform import os from struct import pack import vertica_python from ..message import BulkFrontendMessage class Startup(BulkFrontendMessage): message_id = None def __init__(self, user, database, session_label, os_user_name): BulkFrontendMessage.__init__(self) try: os_platform = platform.platform() except Exception as e: os_platform = '' print("WARN: Cannot get the OS info: {}".format(str(e))) try: pid = str(os.getpid()) except Exception as e: pid = '0' print("WARN: Cannot get the process ID: {}".format(str(e))) self.parameters = { b'user': user, b'database': database, b'client_label': session_label, b'client_type': 'vertica-python', b'client_version': vertica_python.__version__, b'client_os': os_platform, b'client_os_user_name': os_user_name, b'client_pid': pid, } def read_bytes(self): fixed_protocol_version = 3 << 16 | 5 bytes_ = pack('!I', fixed_protocol_version) bytes_ += pack('!16sxIx', b'protocol_version', vertica_python.PROTOCOL_VERSION) for k in self.parameters: v = self.parameters[k].encode('utf-8') bytes_ += pack('!{}sx{}sx'.format(len(k), len(v)), k, v) bytes_ += pack('x') return bytes_
true
true
f703b03b42284b2d7a546b98872dcbf83ea9965f
7,279
py
Python
virt/ansible-latest/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_router_setting.py
lakhlaifi/RedHat-Ansible
27c5077cced9d416081fcd5d69ea44bca0317fa4
[ "Apache-2.0" ]
1
2020-03-29T18:41:01.000Z
2020-03-29T18:41:01.000Z
ansible/ansible/modules/network/fortios/fortios_router_setting.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
7
2020-09-07T17:27:56.000Z
2022-03-02T06:25:46.000Z
ansible/ansible/modules/network/fortios/fortios_router_setting.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
1
2020-03-22T01:04:48.000Z
2020-03-22T01:04:48.000Z
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # the lib use python logging can get it if the following is set in your # Ansible config. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_router_setting short_description: Configure router settings in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to set and modify router feature and setting category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true router_setting: description: - Configure router settings. default: null suboptions: hostname: description: - Hostname for this virtual domain router. show-filter: description: - Prefix-list as filter for showing routes. Source router.prefix-list.name. ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Configure router settings. fortios_router_setting: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" router_setting: hostname: "myhostname" show-filter: "<your_own_value> (source router.prefix-list.name)" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_router_setting_data(json): option_list = ['hostname', 'show-filter'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def flatten_multilists_attributes(data): multilist_attrs = [] for attr in multilist_attrs: try: path = "data['" + "']['".join(elem for elem in attr) + "']" current_val = eval(path) flattened_val = ' '.join(elem for elem in current_val) exec(path + '= flattened_val') except BaseException: pass return data def router_setting(data, fos): vdom = data['vdom'] router_setting_data = data['router_setting'] flattened_data = flatten_multilists_attributes(router_setting_data) filtered_data = filter_router_setting_data(flattened_data) return fos.set('router', 'setting', data=filtered_data, vdom=vdom) def fortios_router(data, fos): login(data) if data['router_setting']: resp = router_setting(data, fos) fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "router_setting": { "required": False, "type": "dict", "options": { "hostname": {"required": False, "type": "str"}, "show-filter": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_router(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
27.467925
97
0.637862
from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_router_setting short_description: Configure router settings in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS by allowing the user to set and modify router feature and setting category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.2 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate ip address. required: true username: description: - FortiOS or FortiGate username. required: true password: description: - FortiOS or FortiGate password. default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol type: bool default: true router_setting: description: - Configure router settings. default: null suboptions: hostname: description: - Hostname for this virtual domain router. show-filter: description: - Prefix-list as filter for showing routes. Source router.prefix-list.name. ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" tasks: - name: Configure router settings. fortios_router_setting: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" router_setting: hostname: "myhostname" show-filter: "<your_own_value> (source router.prefix-list.name)" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule fos = None def login(data): host = data['host'] username = data['username'] password = data['password'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password) def filter_router_setting_data(json): option_list = ['hostname', 'show-filter'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def flatten_multilists_attributes(data): multilist_attrs = [] for attr in multilist_attrs: try: path = "data['" + "']['".join(elem for elem in attr) + "']" current_val = eval(path) flattened_val = ' '.join(elem for elem in current_val) exec(path + '= flattened_val') except BaseException: pass return data def router_setting(data, fos): vdom = data['vdom'] router_setting_data = data['router_setting'] flattened_data = flatten_multilists_attributes(router_setting_data) filtered_data = filter_router_setting_data(flattened_data) return fos.set('router', 'setting', data=filtered_data, vdom=vdom) def fortios_router(data, fos): login(data) if data['router_setting']: resp = router_setting(data, fos) fos.logout() return not resp['status'] == "success", resp['status'] == "success", resp def main(): fields = { "host": {"required": True, "type": "str"}, "username": {"required": True, "type": "str"}, "password": {"required": False, "type": "str", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "router_setting": { "required": False, "type": "dict", "options": { "hostname": {"required": False, "type": "str"}, "show-filter": {"required": False, "type": "str"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") global fos fos = FortiOSAPI() is_error, has_changed, result = fortios_router(module.params, fos) if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
true
true
f703b06ebc1168ba15e4a98c0a1b74b76c56faf5
1,243
py
Python
app/file/config/__init__.py
SystemLight/T-fastapi
a1d50f8bdb403939b4d38cbf8113951ae54a0b17
[ "MIT" ]
null
null
null
app/file/config/__init__.py
SystemLight/T-fastapi
a1d50f8bdb403939b4d38cbf8113951ae54a0b17
[ "MIT" ]
null
null
null
app/file/config/__init__.py
SystemLight/T-fastapi
a1d50f8bdb403939b4d38cbf8113951ae54a0b17
[ "MIT" ]
null
null
null
import os import posixpath from enum import Enum from fastapi import Path, HTTPException from utils import security class UploadPath(str, Enum): default = "default" UPLOAD_PATH_DICT = { UploadPath.default: "default/" } def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")): """ 获取文件上传目录 :param upload_key: :return: """ root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key]) def func(folder): path = security.safe_join(root_path, folder) os.makedirs(path, exist_ok=True) return path return func class DownloadPath(str, Enum): default = "default" DOWNLOAD_PATH_DICT = { DownloadPath.default: "default/" } def get_download(download_key: DownloadPath = Path(..., description="下载文件块位置")): """ 获取下载文件路径 :param download_key: :return: """ root_path = posixpath.abspath(DOWNLOAD_PATH_DICT[download_key]) def func(folder): path = security.safe_join(root_path, folder) if not posixpath.exists(path): raise HTTPException(404, "The access file does not exist") for filename in os.listdir(path): return posixpath.join(path, filename), filename return func
18.833333
80
0.665326
import os import posixpath from enum import Enum from fastapi import Path, HTTPException from utils import security class UploadPath(str, Enum): default = "default" UPLOAD_PATH_DICT = { UploadPath.default: "default/" } def get_upload(upload_key: UploadPath = Path(..., description="上传文件块位置")): root_path = posixpath.abspath(UPLOAD_PATH_DICT[upload_key]) def func(folder): path = security.safe_join(root_path, folder) os.makedirs(path, exist_ok=True) return path return func class DownloadPath(str, Enum): default = "default" DOWNLOAD_PATH_DICT = { DownloadPath.default: "default/" } def get_download(download_key: DownloadPath = Path(..., description="下载文件块位置")): root_path = posixpath.abspath(DOWNLOAD_PATH_DICT[download_key]) def func(folder): path = security.safe_join(root_path, folder) if not posixpath.exists(path): raise HTTPException(404, "The access file does not exist") for filename in os.listdir(path): return posixpath.join(path, filename), filename return func
true
true
f703b19dc731fc9829fbf4343624e5debf96ab76
2,254
py
Python
var/spack/repos/builtin/packages/repeatmodeler/package.py
kkauder/spack
6ae8d5c380c1f42094b05d38be26b03650aafb39
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2019-02-10T13:47:48.000Z
2019-04-17T13:05:17.000Z
var/spack/repos/builtin/packages/repeatmodeler/package.py
kkauder/spack
6ae8d5c380c1f42094b05d38be26b03650aafb39
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
32
2020-12-15T17:29:20.000Z
2022-03-21T15:08:31.000Z
var/spack/repos/builtin/packages/repeatmodeler/package.py
kkauder/spack
6ae8d5c380c1f42094b05d38be26b03650aafb39
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2021-04-07T18:27:09.000Z
2022-03-31T22:52:38.000Z
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Repeatmodeler(Package): """RepeatModeler is a de-novo repeat family identification and modeling package.""" homepage = "http://www.repeatmasker.org/RepeatModeler/" url = "http://www.repeatmasker.org/RepeatModeler/RepeatModeler-open-1.0.11.tar.gz" version('1.0.11', sha256='7ff0d588b40f9ad5ce78876f3ab8d2332a20f5128f6357413f741bb7fa172193') depends_on('perl', type=('build', 'run')) depends_on('perl-json', type=('build', 'run')) depends_on('perl-uri', type=('build', 'run')) depends_on('perl-libwww-perl', type=('build', 'run')) depends_on('repeatmasker', type='run') depends_on('recon+repeatmasker', type='run') depends_on('repeatscout', type='run') depends_on('trf', type='run') depends_on('nseg', type='run') depends_on('ncbi-rmblastn', type='run') def install(self, spec, prefix): # like repeatmasker, another interactive installer # questions: # 1. <enter to continue> # 2. <perl path, default is OK> # 3. <source path, default is OK> # 4. RepeatMasker bin path # 5. RECON bin path # 6. RepeatScout bin path # 7. Nseg bin path # 8. trf bin path # 9. Add a search engine: # 1. RMBlast -> Path, Default? (Y/N) # 2. WUBlast/ABBlast -> Path, Default? (Y/N) # 3. Done config_answers = [ '', '', '', spec['repeatmasker'].prefix.bin, spec['recon'].prefix.bin, spec['repeatscout'].prefix.bin, spec['nseg'].prefix.bin, spec['trf'].prefix.bin, '1', spec['ncbi-rmblastn'].prefix.bin, 'Y', '3', ] config_filename = 'spack-config.in' with open(config_filename, 'w') as f: f.write('\n'.join(config_answers)) with open(config_filename, 'r') as f: perl = which('perl') perl('configure', input=f) install_tree('.', prefix.bin)
33.641791
96
0.582076
from spack import * class Repeatmodeler(Package): homepage = "http://www.repeatmasker.org/RepeatModeler/" url = "http://www.repeatmasker.org/RepeatModeler/RepeatModeler-open-1.0.11.tar.gz" version('1.0.11', sha256='7ff0d588b40f9ad5ce78876f3ab8d2332a20f5128f6357413f741bb7fa172193') depends_on('perl', type=('build', 'run')) depends_on('perl-json', type=('build', 'run')) depends_on('perl-uri', type=('build', 'run')) depends_on('perl-libwww-perl', type=('build', 'run')) depends_on('repeatmasker', type='run') depends_on('recon+repeatmasker', type='run') depends_on('repeatscout', type='run') depends_on('trf', type='run') depends_on('nseg', type='run') depends_on('ncbi-rmblastn', type='run') def install(self, spec, prefix): config_answers = [ '', '', '', spec['repeatmasker'].prefix.bin, spec['recon'].prefix.bin, spec['repeatscout'].prefix.bin, spec['nseg'].prefix.bin, spec['trf'].prefix.bin, '1', spec['ncbi-rmblastn'].prefix.bin, 'Y', '3', ] config_filename = 'spack-config.in' with open(config_filename, 'w') as f: f.write('\n'.join(config_answers)) with open(config_filename, 'r') as f: perl = which('perl') perl('configure', input=f) install_tree('.', prefix.bin)
true
true
f703b1b06e104a60a7e9c722a084a851230c80a8
421
py
Python
4° Período/Programação de Computadores/lista 1/CONCEITOS DE LÓGICA DE PROGRAMAÇÃO/Conceitos básicos/Exercício 1) H.py
sullyvan15/UVV
2390cc2881792d036db1d8b098fe366f47cd98c3
[ "MIT" ]
null
null
null
4° Período/Programação de Computadores/lista 1/CONCEITOS DE LÓGICA DE PROGRAMAÇÃO/Conceitos básicos/Exercício 1) H.py
sullyvan15/UVV
2390cc2881792d036db1d8b098fe366f47cd98c3
[ "MIT" ]
1
2020-10-07T23:33:21.000Z
2020-10-08T01:15:11.000Z
4° Período/Programação de Computadores/lista 1/CONCEITOS DE LÓGICA DE PROGRAMAÇÃO/Conceitos básicos/Exercício 1) H.py
sullyvan15/Universidade-Vila-Velha
2390cc2881792d036db1d8b098fe366f47cd98c3
[ "MIT" ]
null
null
null
import math pi = math.pi raio = float(input('Qual é o raio da esfera?: ')) volume_esf = 4/3*pi*math.pow(raio, 3) litro = 1 lata = litro*5 precolata = 50.00 totaltinta = volume_esf *lata totalpreco = totaltinta * precolata print(f'O volume da esfera é {volume_esf: .2f}') print(f'A quantidade de tinta necessária é {totaltinta} litros de tinta') print(f'O total a pagar é: R$ {totalpreco: .2f}') #AE TO COM DUVIDA
19.136364
73
0.700713
import math pi = math.pi raio = float(input('Qual é o raio da esfera?: ')) volume_esf = 4/3*pi*math.pow(raio, 3) litro = 1 lata = litro*5 precolata = 50.00 totaltinta = volume_esf *lata totalpreco = totaltinta * precolata print(f'O volume da esfera é {volume_esf: .2f}') print(f'A quantidade de tinta necessária é {totaltinta} litros de tinta') print(f'O total a pagar é: R$ {totalpreco: .2f}')
true
true
f703b74ac5d58a4ebdf04bf28c7ab476758aa9d5
9,599
py
Python
tests/test_notify_slack.py
linz/geospatial-data-lake
e92fb2fc0e050da471e3ecbc1c0331c551612092
[ "MIT" ]
5
2021-02-04T01:07:20.000Z
2021-04-16T09:35:21.000Z
tests/test_notify_slack.py
linz/geospatial-data-lake
e92fb2fc0e050da471e3ecbc1c0331c551612092
[ "MIT" ]
252
2020-10-26T10:34:43.000Z
2021-05-17T04:21:08.000Z
tests/test_notify_slack.py
linz/geospatial-data-lake
e92fb2fc0e050da471e3ecbc1c0331c551612092
[ "MIT" ]
1
2021-05-05T04:23:32.000Z
2021-05-05T04:23:32.000Z
from datetime import datetime, timezone from http import HTTPStatus from json import dumps, load from logging import getLogger from os import environ from unittest.mock import MagicMock, patch from mypy_boto3_events import EventBridgeClient from mypy_boto3_lambda import LambdaClient from mypy_boto3_sns.type_defs import MessageAttributeValueTypeDef from pytest import mark from pytest_subtests import SubTests from backend.api_keys import EVENT_KEY from backend.api_responses import STATUS_CODE_KEY from backend.aws_message_attributes import DATA_TYPE_STRING from backend.notify_status_update.task import ( EVENT_DETAIL_KEY, MESSAGE_ATTRIBUTE_DATASET_KEY, MESSAGE_ATTRIBUTE_STATUS_KEY, SLACK_URL_ENV_NAME, STEP_FUNCTION_ARN_KEY, STEP_FUNCTION_STARTDATE_KEY, STEP_FUNCTION_STOPDATE_KEY, WEBHOOK_MESSAGE_BLOCKS_KEY, lambda_handler, publish_sns_message, ) from backend.resources import ResourceName from backend.step_function import Outcome from backend.step_function_keys import ( ASSET_UPLOAD_KEY, DATASET_ID_KEY, DATASET_PREFIX_KEY, ERRORS_KEY, INPUT_KEY, JOB_STATUS_FAILED, JOB_STATUS_RUNNING, JOB_STATUS_SUCCEEDED, METADATA_UPLOAD_KEY, NEW_VERSION_S3_LOCATION, OUTPUT_KEY, STATUS_KEY, STEP_FUNCTION_KEY, UPDATE_DATASET_KEY, UPLOAD_STATUS_KEY, VALIDATION_KEY, VERSION_ID_KEY, ) from .aws_utils import any_arn_formatted_string, any_lambda_context, any_s3_url from .general_generators import any_https_url from .stac_generators import any_dataset_id, any_dataset_prefix, any_dataset_version_id STEP_FUNCTION_START_MILLISECOND_TIMESTAMP = round( datetime( 2001, 2, 3, hour=4, minute=5, second=6, microsecond=789876, tzinfo=timezone.utc ).timestamp() * 1000 ) STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP = STEP_FUNCTION_START_MILLISECOND_TIMESTAMP + 10 @patch("backend.notify_status_update.task.WebhookClient.send") @patch("backend.notify_status_update.task.get_import_status_given_arn") def should_notify_slack_with_finished_details_when_url_set( step_func_status_mock: MagicMock, webhook_client_mock: MagicMock ) -> None: # Given webhook_client_mock.return_value.status_code = HTTPStatus.OK step_func_status_mock.return_value = { STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_SUCCEEDED}, VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, } mock_slack_url = any_https_url() with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): # When notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_SUCCEEDED, STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(), INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), VERSION_ID_KEY: any_dataset_version_id(), } ), OUTPUT_KEY: dumps( { UPLOAD_STATUS_KEY: { VALIDATION_KEY: "", ASSET_UPLOAD_KEY: "", METADATA_UPLOAD_KEY: "", }, UPDATE_DATASET_KEY: {NEW_VERSION_S3_LOCATION: any_s3_url()}, } ), STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP, STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP, } } lambda_handler(notify_status_update_input, any_lambda_context()) # Then assert there is 15 slack_sdk message 'blocks' sent to webhook url webhook_client_mock.assert_called_once() assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 15 @patch("backend.notify_status_update.task.WebhookClient.send") def should_not_notify_slack_when_step_function_running(webhook_client_mock: MagicMock) -> None: # Given webhook_client_mock.return_value.status_code = HTTPStatus.OK mock_slack_url = any_https_url() with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): # When notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_RUNNING, STEP_FUNCTION_STOPDATE_KEY: None, } } lambda_handler(notify_status_update_input, any_lambda_context()) # Then webhook_client_mock.assert_not_called() @patch("backend.notify_status_update.task.WebhookClient.send") @patch("backend.notify_status_update.task.get_import_status_given_arn") def should_notify_slack_when_step_function_failed( step_func_status_mock: MagicMock, webhook_client_mock: MagicMock ) -> None: # Given webhook_client_mock.return_value.status_code = HTTPStatus.OK mock_slack_url = any_https_url() step_func_status_mock.return_value = { STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_FAILED}, VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, } with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): # When notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_FAILED, STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(), INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), VERSION_ID_KEY: any_dataset_version_id(), } ), STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP, STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP, }, OUTPUT_KEY: None, } lambda_handler(notify_status_update_input, any_lambda_context()) # Then assert there is 13 slack_sdk message 'blocks' sent to webhook url webhook_client_mock.assert_called_once() assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 13 @patch("backend.notify_status_update.task.WebhookClient.send") def should_log_and_not_post_to_slack_when_url_not_set( webhook_client_mock: MagicMock, subtests: SubTests ) -> None: # Given logger = getLogger("backend.notify_status_update.task") with patch("backend.notify_status_update.task.publish_sns_message"), patch.object( logger, "debug" ) as logger_mock: # When lambda_handler({}, any_lambda_context()) # Then with subtests.test("no slack message"): assert not webhook_client_mock.called with subtests.test("log created"): expected_log = dumps({EVENT_KEY: {}}) logger_mock.assert_any_call(expected_log) @patch("backend.notify_status_update.task.get_param") def should_publish_sns_message(get_param_mock: MagicMock) -> None: # Given get_param_mock.return_value = topic_arn = any_arn_formatted_string() dataset_prefix = any_dataset_prefix() publish_sns_message_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_SUCCEEDED, INPUT_KEY: dumps( { DATASET_PREFIX_KEY: dataset_prefix, } ), } } expected_sns_call = { "TopicArn": topic_arn, "Message": dumps(publish_sns_message_input), "MessageAttributes": { MESSAGE_ATTRIBUTE_DATASET_KEY: MessageAttributeValueTypeDef( DataType=DATA_TYPE_STRING, StringValue=dataset_prefix ), MESSAGE_ATTRIBUTE_STATUS_KEY: MessageAttributeValueTypeDef( DataType=DATA_TYPE_STRING, StringValue=JOB_STATUS_SUCCEEDED ), }, } # When with patch("backend.notify_status_update.task.SNS_CLIENT.publish") as sns_client_mock: publish_sns_message(publish_sns_message_input) # Then assert sns_client_mock.call_args[1] == expected_sns_call @mark.infrastructure def should_launch_notify_slack_endpoint_lambda_function( lambda_client: LambdaClient, events_client: EventBridgeClient ) -> None: notify_status_lambda_arn = events_client.list_targets_by_rule( Rule=ResourceName.CLOUDWATCH_RULE_NAME.value )["Targets"][0]["Arn"] # When body = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_FAILED, INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), } ), }, OUTPUT_KEY: None, } resp = load( lambda_client.invoke( FunctionName=notify_status_lambda_arn, Payload=dumps(body).encode(), )["Payload"] ) assert resp.get(STATUS_CODE_KEY) == HTTPStatus.OK, resp
34.405018
95
0.679446
from datetime import datetime, timezone from http import HTTPStatus from json import dumps, load from logging import getLogger from os import environ from unittest.mock import MagicMock, patch from mypy_boto3_events import EventBridgeClient from mypy_boto3_lambda import LambdaClient from mypy_boto3_sns.type_defs import MessageAttributeValueTypeDef from pytest import mark from pytest_subtests import SubTests from backend.api_keys import EVENT_KEY from backend.api_responses import STATUS_CODE_KEY from backend.aws_message_attributes import DATA_TYPE_STRING from backend.notify_status_update.task import ( EVENT_DETAIL_KEY, MESSAGE_ATTRIBUTE_DATASET_KEY, MESSAGE_ATTRIBUTE_STATUS_KEY, SLACK_URL_ENV_NAME, STEP_FUNCTION_ARN_KEY, STEP_FUNCTION_STARTDATE_KEY, STEP_FUNCTION_STOPDATE_KEY, WEBHOOK_MESSAGE_BLOCKS_KEY, lambda_handler, publish_sns_message, ) from backend.resources import ResourceName from backend.step_function import Outcome from backend.step_function_keys import ( ASSET_UPLOAD_KEY, DATASET_ID_KEY, DATASET_PREFIX_KEY, ERRORS_KEY, INPUT_KEY, JOB_STATUS_FAILED, JOB_STATUS_RUNNING, JOB_STATUS_SUCCEEDED, METADATA_UPLOAD_KEY, NEW_VERSION_S3_LOCATION, OUTPUT_KEY, STATUS_KEY, STEP_FUNCTION_KEY, UPDATE_DATASET_KEY, UPLOAD_STATUS_KEY, VALIDATION_KEY, VERSION_ID_KEY, ) from .aws_utils import any_arn_formatted_string, any_lambda_context, any_s3_url from .general_generators import any_https_url from .stac_generators import any_dataset_id, any_dataset_prefix, any_dataset_version_id STEP_FUNCTION_START_MILLISECOND_TIMESTAMP = round( datetime( 2001, 2, 3, hour=4, minute=5, second=6, microsecond=789876, tzinfo=timezone.utc ).timestamp() * 1000 ) STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP = STEP_FUNCTION_START_MILLISECOND_TIMESTAMP + 10 @patch("backend.notify_status_update.task.WebhookClient.send") @patch("backend.notify_status_update.task.get_import_status_given_arn") def should_notify_slack_with_finished_details_when_url_set( step_func_status_mock: MagicMock, webhook_client_mock: MagicMock ) -> None: webhook_client_mock.return_value.status_code = HTTPStatus.OK step_func_status_mock.return_value = { STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_SUCCEEDED}, VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, } mock_slack_url = any_https_url() with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_SUCCEEDED, STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(), INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), VERSION_ID_KEY: any_dataset_version_id(), } ), OUTPUT_KEY: dumps( { UPLOAD_STATUS_KEY: { VALIDATION_KEY: "", ASSET_UPLOAD_KEY: "", METADATA_UPLOAD_KEY: "", }, UPDATE_DATASET_KEY: {NEW_VERSION_S3_LOCATION: any_s3_url()}, } ), STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP, STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP, } } lambda_handler(notify_status_update_input, any_lambda_context()) webhook_client_mock.assert_called_once() assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 15 @patch("backend.notify_status_update.task.WebhookClient.send") def should_not_notify_slack_when_step_function_running(webhook_client_mock: MagicMock) -> None: webhook_client_mock.return_value.status_code = HTTPStatus.OK mock_slack_url = any_https_url() with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_RUNNING, STEP_FUNCTION_STOPDATE_KEY: None, } } lambda_handler(notify_status_update_input, any_lambda_context()) webhook_client_mock.assert_not_called() @patch("backend.notify_status_update.task.WebhookClient.send") @patch("backend.notify_status_update.task.get_import_status_given_arn") def should_notify_slack_when_step_function_failed( step_func_status_mock: MagicMock, webhook_client_mock: MagicMock ) -> None: webhook_client_mock.return_value.status_code = HTTPStatus.OK mock_slack_url = any_https_url() step_func_status_mock.return_value = { STEP_FUNCTION_KEY: {STATUS_KEY: JOB_STATUS_FAILED}, VALIDATION_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, METADATA_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, ASSET_UPLOAD_KEY: {STATUS_KEY: Outcome.SKIPPED.value, ERRORS_KEY: []}, } with patch.dict(environ, {SLACK_URL_ENV_NAME: mock_slack_url}), patch( "backend.notify_status_update.task.publish_sns_message" ): notify_status_update_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_FAILED, STEP_FUNCTION_ARN_KEY: any_arn_formatted_string(), INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), VERSION_ID_KEY: any_dataset_version_id(), } ), STEP_FUNCTION_STARTDATE_KEY: STEP_FUNCTION_START_MILLISECOND_TIMESTAMP, STEP_FUNCTION_STOPDATE_KEY: STEP_FUNCTION_STOP_MILLISECOND_TIMESTAMP, }, OUTPUT_KEY: None, } lambda_handler(notify_status_update_input, any_lambda_context()) webhook_client_mock.assert_called_once() assert len(webhook_client_mock.call_args[1][WEBHOOK_MESSAGE_BLOCKS_KEY]) == 13 @patch("backend.notify_status_update.task.WebhookClient.send") def should_log_and_not_post_to_slack_when_url_not_set( webhook_client_mock: MagicMock, subtests: SubTests ) -> None: logger = getLogger("backend.notify_status_update.task") with patch("backend.notify_status_update.task.publish_sns_message"), patch.object( logger, "debug" ) as logger_mock: lambda_handler({}, any_lambda_context()) with subtests.test("no slack message"): assert not webhook_client_mock.called with subtests.test("log created"): expected_log = dumps({EVENT_KEY: {}}) logger_mock.assert_any_call(expected_log) @patch("backend.notify_status_update.task.get_param") def should_publish_sns_message(get_param_mock: MagicMock) -> None: get_param_mock.return_value = topic_arn = any_arn_formatted_string() dataset_prefix = any_dataset_prefix() publish_sns_message_input = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_SUCCEEDED, INPUT_KEY: dumps( { DATASET_PREFIX_KEY: dataset_prefix, } ), } } expected_sns_call = { "TopicArn": topic_arn, "Message": dumps(publish_sns_message_input), "MessageAttributes": { MESSAGE_ATTRIBUTE_DATASET_KEY: MessageAttributeValueTypeDef( DataType=DATA_TYPE_STRING, StringValue=dataset_prefix ), MESSAGE_ATTRIBUTE_STATUS_KEY: MessageAttributeValueTypeDef( DataType=DATA_TYPE_STRING, StringValue=JOB_STATUS_SUCCEEDED ), }, } with patch("backend.notify_status_update.task.SNS_CLIENT.publish") as sns_client_mock: publish_sns_message(publish_sns_message_input) assert sns_client_mock.call_args[1] == expected_sns_call @mark.infrastructure def should_launch_notify_slack_endpoint_lambda_function( lambda_client: LambdaClient, events_client: EventBridgeClient ) -> None: notify_status_lambda_arn = events_client.list_targets_by_rule( Rule=ResourceName.CLOUDWATCH_RULE_NAME.value )["Targets"][0]["Arn"] body = { EVENT_DETAIL_KEY: { STATUS_KEY: JOB_STATUS_FAILED, INPUT_KEY: dumps( { DATASET_ID_KEY: any_dataset_id(), DATASET_PREFIX_KEY: any_dataset_prefix(), } ), }, OUTPUT_KEY: None, } resp = load( lambda_client.invoke( FunctionName=notify_status_lambda_arn, Payload=dumps(body).encode(), )["Payload"] ) assert resp.get(STATUS_CODE_KEY) == HTTPStatus.OK, resp
true
true
f703bac82a9f2a7e37f961023a6241cdb63ca46d
458
py
Python
lib/twl.py
riandakarizal/ITeung
2d3fc7e4974c9a9b67ff61f2a77a528988b55820
[ "MIT" ]
null
null
null
lib/twl.py
riandakarizal/ITeung
2d3fc7e4974c9a9b67ff61f2a77a528988b55820
[ "MIT" ]
37
2020-03-22T23:21:14.000Z
2020-09-16T15:07:06.000Z
lib/twl.py
riandakarizal/ITeung
2d3fc7e4974c9a9b67ff61f2a77a528988b55820
[ "MIT" ]
1
2020-09-08T11:31:30.000Z
2020-09-08T11:31:30.000Z
# -*- coding: utf-8 -*- """ Created on Sat Mar 14 08:55:39 2020 @author: rolly """ import config from twilio.rest import Client #https://api.whatsapp.com/send?phone=14155238886&text=join%20actual-nor&source=&data= def sendMsg(num,msg): client = Client(config.account_sid, config.auth_token) message = client.messages.create( to="whatsapp:+"+num, from_="whatsapp:+14155238886", body=msg) print(message.sid)
24.105263
85
0.657205
import config from twilio.rest import Client def sendMsg(num,msg): client = Client(config.account_sid, config.auth_token) message = client.messages.create( to="whatsapp:+"+num, from_="whatsapp:+14155238886", body=msg) print(message.sid)
true
true
f703bb46c6369210b5bbac4b3c528d3ee60d82b6
1,416
py
Python
aws_lambda_powertools/utilities/parser/envelopes/sns.py
n2N8Z/aws-lambda-powertools-python
0cb5d506f534ac76b42f2d5959d93c7b2bb4d8e9
[ "MIT-0" ]
null
null
null
aws_lambda_powertools/utilities/parser/envelopes/sns.py
n2N8Z/aws-lambda-powertools-python
0cb5d506f534ac76b42f2d5959d93c7b2bb4d8e9
[ "MIT-0" ]
null
null
null
aws_lambda_powertools/utilities/parser/envelopes/sns.py
n2N8Z/aws-lambda-powertools-python
0cb5d506f534ac76b42f2d5959d93c7b2bb4d8e9
[ "MIT-0" ]
null
null
null
import logging from typing import Any, Dict, List, Optional, Union from ..models import SnsModel from ..types import Model from .base import BaseEnvelope logger = logging.getLogger(__name__) class SnsEnvelope(BaseEnvelope): """SNS Envelope to extract array of Records The record's body parameter is a string, though it can also be a JSON encoded string. Regardless of its type it'll be parsed into a BaseModel object. Note: Records will be parsed the same way so if model is str, all items in the list will be parsed as str and npt as JSON (and vice versa) """ def parse(self, data: Optional[Union[Dict[str, Any], Any]], model: Model) -> List[Optional[Model]]: """Parses records found with model provided Parameters ---------- data : Dict Lambda event to be parsed model : Model Data model provided to parse after extracting data using envelope Returns ------- List List of records parsed with model provided """ logger.debug(f"Parsing incoming data with SNS model {SnsModel}") parsed_envelope = SnsModel.parse_obj(data) output = [] logger.debug(f"Parsing SNS records in `body` with {model}") for record in parsed_envelope.Records: output.append(self._parse(data=record.Sns.Message, model=model)) return output
32.930233
103
0.655367
import logging from typing import Any, Dict, List, Optional, Union from ..models import SnsModel from ..types import Model from .base import BaseEnvelope logger = logging.getLogger(__name__) class SnsEnvelope(BaseEnvelope): def parse(self, data: Optional[Union[Dict[str, Any], Any]], model: Model) -> List[Optional[Model]]: logger.debug(f"Parsing incoming data with SNS model {SnsModel}") parsed_envelope = SnsModel.parse_obj(data) output = [] logger.debug(f"Parsing SNS records in `body` with {model}") for record in parsed_envelope.Records: output.append(self._parse(data=record.Sns.Message, model=model)) return output
true
true
f703bc9d08a5ff497c1674f4e927bd52424324e7
6,522
py
Python
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/vmware/vmware_datacenter.py
otus-devops-2019-02/yyashkin_infra
0cd0c003884155ac922e3e301305ac202de7028c
[ "MIT" ]
1
2020-03-29T18:41:01.000Z
2020-03-29T18:41:01.000Z
ansible/ansible/modules/cloud/vmware/vmware_datacenter.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
7
2020-09-07T17:27:56.000Z
2022-03-02T06:25:46.000Z
ansible/ansible/modules/cloud/vmware/vmware_datacenter.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
1
2020-03-22T01:04:48.000Z
2020-03-22T01:04:48.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2015, Joseph Callen <jcallen () csc.com> # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_datacenter short_description: Manage VMware vSphere Datacenters description: - This module can be used to manage (create, delete) VMware vSphere Datacenters. version_added: 2.0 author: - Joseph Callen (@jcpowermac) - Kamil Szczygiel (@kamsz) notes: - Tested on vSphere 6.0, 6.5 requirements: - "python >= 2.6" - PyVmomi options: datacenter_name: description: - The name of the datacenter the cluster will be created in. required: True state: description: - If the datacenter should be present or absent. choices: [ present, absent ] default: present extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Create Datacenter vmware_datacenter: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter_name: '{{ datacenter_name }}' state: present delegate_to: localhost - name: Delete Datacenter vmware_datacenter: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter_name: '{{ datacenter_name }}' state: absent delegate_to: localhost register: datacenter_delete_result ''' RETURN = """# """ try: from pyVmomi import vim, vmodl except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import PyVmomi, find_datacenter_by_name, vmware_argument_spec, wait_for_task from ansible.module_utils._text import to_native class VmwareDatacenterManager(PyVmomi): def __init__(self, module): super(VmwareDatacenterManager, self).__init__(module) self.datacenter_name = self.params.get('datacenter_name') self.datacenter_obj = self.get_datacenter() def ensure(self): state = self.module.params.get('state') if state == 'present': self.create_datacenter() if state == 'absent': self.destroy_datacenter() def get_datacenter(self): try: datacenter_obj = find_datacenter_by_name(self.content, self.datacenter_name) return datacenter_obj except (vmodl.MethodFault, vmodl.RuntimeFault) as runtime_fault: self.module.fail_json(msg="Failed to get datacenter '%s'" " due to : %s" % (self.datacenter_name, to_native(runtime_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to get datacenter" " '%s' due to generic error: %s" % (self.datacenter_name, to_native(generic_exc))) def create_datacenter(self): folder = self.content.rootFolder changed = False try: if not self.datacenter_obj and not self.module.check_mode: changed = True folder.CreateDatacenter(name=self.datacenter_name) self.module.exit_json(changed=changed) except vim.fault.DuplicateName as duplicate_name: self.module.exit_json(changed=changed) except vim.fault.InvalidName as invalid_name: self.module.fail_json(msg="Specified datacenter name '%s' is an" " invalid name : %s" % (self.datacenter_name, to_native(invalid_name.msg))) except vmodl.fault.NotSupported as not_supported: # This should never happen self.module.fail_json(msg="Trying to create a datacenter '%s' on" " an incorrect folder object : %s" % (self.datacenter_name, to_native(not_supported.msg))) except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: self.module.fail_json(msg="Failed to create a datacenter" " '%s' due to : %s" % (self.datacenter_name, to_native(runtime_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to create a datacenter" " '%s' due to generic error: %s" % (self.datacenter_name, to_native(generic_exc))) def destroy_datacenter(self): results = dict(changed=False) try: if self.datacenter_obj and not self.module.check_mode: task = self.datacenter_obj.Destroy_Task() changed, result = wait_for_task(task) results['changed'] = changed results['result'] = result self.module.exit_json(**results) except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: self.module.fail_json(msg="Failed to delete a datacenter" " '%s' due to : %s" % (self.datacenter_name, to_native(runtime_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to delete a datacenter" " '%s' due to generic error: %s" % (self.datacenter_name, to_native(generic_exc))) def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( datacenter_name=dict(required=True, type='str'), state=dict(default='present', choices=['present', 'absent'], type='str') ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) vmware_datacenter_mgr = VmwareDatacenterManager(module) vmware_datacenter_mgr.ensure() if __name__ == '__main__': main()
38.364706
109
0.58939
from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: vmware_datacenter short_description: Manage VMware vSphere Datacenters description: - This module can be used to manage (create, delete) VMware vSphere Datacenters. version_added: 2.0 author: - Joseph Callen (@jcpowermac) - Kamil Szczygiel (@kamsz) notes: - Tested on vSphere 6.0, 6.5 requirements: - "python >= 2.6" - PyVmomi options: datacenter_name: description: - The name of the datacenter the cluster will be created in. required: True state: description: - If the datacenter should be present or absent. choices: [ present, absent ] default: present extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Create Datacenter vmware_datacenter: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter_name: '{{ datacenter_name }}' state: present delegate_to: localhost - name: Delete Datacenter vmware_datacenter: hostname: '{{ vcenter_hostname }}' username: '{{ vcenter_username }}' password: '{{ vcenter_password }}' datacenter_name: '{{ datacenter_name }}' state: absent delegate_to: localhost register: datacenter_delete_result ''' RETURN = """# """ try: from pyVmomi import vim, vmodl except ImportError: pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.vmware import PyVmomi, find_datacenter_by_name, vmware_argument_spec, wait_for_task from ansible.module_utils._text import to_native class VmwareDatacenterManager(PyVmomi): def __init__(self, module): super(VmwareDatacenterManager, self).__init__(module) self.datacenter_name = self.params.get('datacenter_name') self.datacenter_obj = self.get_datacenter() def ensure(self): state = self.module.params.get('state') if state == 'present': self.create_datacenter() if state == 'absent': self.destroy_datacenter() def get_datacenter(self): try: datacenter_obj = find_datacenter_by_name(self.content, self.datacenter_name) return datacenter_obj except (vmodl.MethodFault, vmodl.RuntimeFault) as runtime_fault: self.module.fail_json(msg="Failed to get datacenter '%s'" " due to : %s" % (self.datacenter_name, to_native(runtime_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to get datacenter" " '%s' due to generic error: %s" % (self.datacenter_name, to_native(generic_exc))) def create_datacenter(self): folder = self.content.rootFolder changed = False try: if not self.datacenter_obj and not self.module.check_mode: changed = True folder.CreateDatacenter(name=self.datacenter_name) self.module.exit_json(changed=changed) except vim.fault.DuplicateName as duplicate_name: self.module.exit_json(changed=changed) except vim.fault.InvalidName as invalid_name: self.module.fail_json(msg="Specified datacenter name '%s' is an" " invalid name : %s" % (self.datacenter_name, to_native(invalid_name.msg))) except vmodl.fault.NotSupported as not_supported: self.module.fail_json(msg="Trying to create a datacenter '%s' on" " an incorrect folder object : %s" % (self.datacenter_name, to_native(not_supported.msg))) except (vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: self.module.fail_json(msg="Failed to create a datacenter" " '%s' due to : %s" % (self.datacenter_name, to_native(runtime_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to create a datacenter" " '%s' due to generic error: %s" % (self.datacenter_name, to_native(generic_exc))) def destroy_datacenter(self): results = dict(changed=False) try: if self.datacenter_obj and not self.module.check_mode: task = self.datacenter_obj.Destroy_Task() changed, result = wait_for_task(task) results['changed'] = changed results['result'] = result self.module.exit_json(**results) except (vim.fault.VimFault, vmodl.RuntimeFault, vmodl.MethodFault) as runtime_fault: self.module.fail_json(msg="Failed to delete a datacenter" " '%s' due to : %s" % (self.datacenter_name, to_native(runtime_fault.msg))) except Exception as generic_exc: self.module.fail_json(msg="Failed to delete a datacenter" " '%s' due to generic error: %s" % (self.datacenter_name, to_native(generic_exc))) def main(): argument_spec = vmware_argument_spec() argument_spec.update( dict( datacenter_name=dict(required=True, type='str'), state=dict(default='present', choices=['present', 'absent'], type='str') ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) vmware_datacenter_mgr = VmwareDatacenterManager(module) vmware_datacenter_mgr.ensure() if __name__ == '__main__': main()
true
true
f703bdae29f09a662a7398ff7215e7e7a58112a9
1,142
py
Python
quick_reports_demo/blog/migrations/0001_initial.py
brsbilgic/django-quick-reports
d69a532711d23f3640874c1c5fcacdbe328c6805
[ "MIT" ]
7
2015-07-12T14:45:18.000Z
2020-12-25T04:38:22.000Z
quick_reports_demo/blog/migrations/0001_initial.py
brsbilgic/django_jooy_reporting
d69a532711d23f3640874c1c5fcacdbe328c6805
[ "MIT" ]
1
2021-03-19T21:49:50.000Z
2021-03-19T21:49:50.000Z
quick_reports_demo/blog/migrations/0001_initial.py
brsbilgic/django-quick-reports
d69a532711d23f3640874c1c5fcacdbe328c6805
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=190)), ('body', models.TextField()), ('slug', models.SlugField(max_length=190)), ('status', models.IntegerField(default=0, choices=[(0, b'Draft'), (1, b'Published')])), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('author', models.ForeignKey(related_name=b'blog_article_author', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-created_at'], }, bases=(models.Model,), ), ]
34.606061
114
0.590193
from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=190)), ('body', models.TextField()), ('slug', models.SlugField(max_length=190)), ('status', models.IntegerField(default=0, choices=[(0, b'Draft'), (1, b'Published')])), ('created_at', models.DateTimeField(default=django.utils.timezone.now)), ('author', models.ForeignKey(related_name=b'blog_article_author', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-created_at'], }, bases=(models.Model,), ), ]
true
true
f703be8eb9c4bf6ae4bb900027692d9f03afd640
4,804
py
Python
third_party/py/gflags/gflags/flags_modules_for_testing/module_bar.py
sevki/bazel
b18915752a69fbbc6ed94e1710198167593565fc
[ "Apache-2.0" ]
218
2015-04-01T07:12:40.000Z
2021-12-28T05:02:06.000Z
third_party/py/gflags/gflags/flags_modules_for_testing/module_bar.py
sevki/bazel
b18915752a69fbbc6ed94e1710198167593565fc
[ "Apache-2.0" ]
67
2022-01-12T18:22:13.000Z
2022-01-12T18:24:28.000Z
third_party/py/gflags/gflags/flags_modules_for_testing/module_bar.py
sevki/bazel
b18915752a69fbbc6ed94e1710198167593565fc
[ "Apache-2.0" ]
72
2015-04-01T07:12:41.000Z
2021-10-21T00:36:27.000Z
#!/usr/bin/env python # Copyright 2014 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Auxiliary module for testing gflags.py. The purpose of this module is to define a few flags. We want to make sure the unit tests for gflags.py involve more than one module. """ __author__ = 'salcianu@google.com (Alex Salcianu)' import gflags from gflags import _helpers FLAGS = gflags.FLAGS def DefineFlags(flag_values=FLAGS): """Defines some flags. Args: flag_values: The FlagValues object we want to register the flags with. """ # The 'tmod_bar_' prefix (short for 'test_module_bar') ensures there # is no name clash with the existing flags. gflags.DEFINE_boolean('tmod_bar_x', True, 'Boolean flag.', flag_values=flag_values) gflags.DEFINE_string('tmod_bar_y', 'default', 'String flag.', flag_values=flag_values) gflags.DEFINE_boolean('tmod_bar_z', False, 'Another boolean flag from module bar.', flag_values=flag_values) gflags.DEFINE_integer('tmod_bar_t', 4, 'Sample int flag.', flag_values=flag_values) gflags.DEFINE_integer('tmod_bar_u', 5, 'Sample int flag.', flag_values=flag_values) gflags.DEFINE_integer('tmod_bar_v', 6, 'Sample int flag.', flag_values=flag_values) def RemoveOneFlag(flag_name, flag_values=FLAGS): """Removes the definition of one flag from gflags.FLAGS. Note: if the flag is not defined in gflags.FLAGS, this function does not do anything (in particular, it does not raise any exception). Motivation: We use this function for cleanup *after* a test: if there was a failure during a test and not all flags were declared, we do not want the cleanup code to crash. Args: flag_name: A string, the name of the flag to delete. flag_values: The FlagValues object we remove the flag from. """ if flag_name in flag_values.FlagDict(): flag_values.__delattr__(flag_name) def NamesOfDefinedFlags(): """Returns: List of names of the flags declared in this module.""" return ['tmod_bar_x', 'tmod_bar_y', 'tmod_bar_z', 'tmod_bar_t', 'tmod_bar_u', 'tmod_bar_v'] def RemoveFlags(flag_values=FLAGS): """Deletes the flag definitions done by the above DefineFlags(). Args: flag_values: The FlagValues object we remove the flags from. """ for flag_name in NamesOfDefinedFlags(): RemoveOneFlag(flag_name, flag_values=flag_values) def GetModuleName(): """Uses GetCallingModule() to return the name of this module. For checking that _GetCallingModule works as expected. Returns: A string, the name of this module. """ return _helpers.GetCallingModule() def ExecuteCode(code, global_dict): """Executes some code in a given global environment. For testing of _GetCallingModule. Args: code: A string, the code to be executed. global_dict: A dictionary, the global environment that code should be executed in. """ # Indeed, using exec generates a lint warning. But some user code # actually uses exec, and we have to test for it ... exec(code, global_dict) # pylint: disable=exec-used def DisclaimKeyFlags(): """Disclaims flags declared in this module.""" gflags.DISCLAIM_key_flags()
35.323529
72
0.720025
__author__ = 'salcianu@google.com (Alex Salcianu)' import gflags from gflags import _helpers FLAGS = gflags.FLAGS def DefineFlags(flag_values=FLAGS): gflags.DEFINE_boolean('tmod_bar_x', True, 'Boolean flag.', flag_values=flag_values) gflags.DEFINE_string('tmod_bar_y', 'default', 'String flag.', flag_values=flag_values) gflags.DEFINE_boolean('tmod_bar_z', False, 'Another boolean flag from module bar.', flag_values=flag_values) gflags.DEFINE_integer('tmod_bar_t', 4, 'Sample int flag.', flag_values=flag_values) gflags.DEFINE_integer('tmod_bar_u', 5, 'Sample int flag.', flag_values=flag_values) gflags.DEFINE_integer('tmod_bar_v', 6, 'Sample int flag.', flag_values=flag_values) def RemoveOneFlag(flag_name, flag_values=FLAGS): if flag_name in flag_values.FlagDict(): flag_values.__delattr__(flag_name) def NamesOfDefinedFlags(): return ['tmod_bar_x', 'tmod_bar_y', 'tmod_bar_z', 'tmod_bar_t', 'tmod_bar_u', 'tmod_bar_v'] def RemoveFlags(flag_values=FLAGS): for flag_name in NamesOfDefinedFlags(): RemoveOneFlag(flag_name, flag_values=flag_values) def GetModuleName(): return _helpers.GetCallingModule() def ExecuteCode(code, global_dict): exec(code, global_dict) def DisclaimKeyFlags(): gflags.DISCLAIM_key_flags()
true
true
f703bef3cc58393e1a48a25c34a53fe8a2773f4a
603
py
Python
pluto/control/modes/live_mode.py
chalant/pluto
e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc
[ "Apache-2.0" ]
null
null
null
pluto/control/modes/live_mode.py
chalant/pluto
e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc
[ "Apache-2.0" ]
null
null
null
pluto/control/modes/live_mode.py
chalant/pluto
e7bfd35a2c1fc0e0753bd2f840b0a4385b5124fc
[ "Apache-2.0" ]
null
null
null
from pluto.control.modes import mode from pluto.control.modes.processes import process_manager from protos import broker_pb2_grpc class LiveControlMode(mode.ControlCommandHandler): def __init__(self, server, framework_url, process_factory): super(LiveControlMode, self).__init__(framework_url, process_factory) broker_pb2_grpc.add_BrokerServicer_to_server(self._broker, server) def _create_process_manager(self): return process_manager.LiveProcessManager() def _accept_loop(self, loop): # todo: only accept LiveLoop type or subtypes return False
33.5
77
0.772803
from pluto.control.modes import mode from pluto.control.modes.processes import process_manager from protos import broker_pb2_grpc class LiveControlMode(mode.ControlCommandHandler): def __init__(self, server, framework_url, process_factory): super(LiveControlMode, self).__init__(framework_url, process_factory) broker_pb2_grpc.add_BrokerServicer_to_server(self._broker, server) def _create_process_manager(self): return process_manager.LiveProcessManager() def _accept_loop(self, loop): return False
true
true
f703c0dd2d885c53936772b4024a7a0e280f7489
1,998
py
Python
docs/conf.py
Hiteshsuhas/err-stackstorm
7579350ac50d9324b64a73b86d57e094270cb275
[ "Apache-2.0" ]
15
2016-09-19T12:06:12.000Z
2021-11-30T12:04:44.000Z
docs/conf.py
Hiteshsuhas/err-stackstorm
7579350ac50d9324b64a73b86d57e094270cb275
[ "Apache-2.0" ]
22
2017-06-19T18:13:54.000Z
2021-05-28T09:25:01.000Z
docs/conf.py
Hiteshsuhas/err-stackstorm
7579350ac50d9324b64a73b86d57e094270cb275
[ "Apache-2.0" ]
7
2017-06-19T17:03:59.000Z
2021-09-27T11:06:31.000Z
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'err-stackstorm' copyright = '2019, err-stackstorm contributors' author = 'err-stackstorm contributors' # The full version, including alpha/beta/rc tags release = '2.1.4' # -- General configuration --------------------------------------------------- master_doc = "index" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static']
34.448276
79
0.667167
project = 'err-stackstorm' copyright = '2019, err-stackstorm contributors' author = 'err-stackstorm contributors' release = '2.1.4' master_doc = "index" extensions = [ ] templates_path = ['_templates'] exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] html_theme = "sphinx_rtd_theme" html_static_path = ['_static']
true
true
f703c31023fc04137e38d2d20e70db35d5885d77
19,181
py
Python
tests/devices/test_default_qubit_jax.py
pearcandy/pennylane
dfa35989cd0798496e41999a197bcf0eb26185df
[ "Apache-2.0" ]
null
null
null
tests/devices/test_default_qubit_jax.py
pearcandy/pennylane
dfa35989cd0798496e41999a197bcf0eb26185df
[ "Apache-2.0" ]
null
null
null
tests/devices/test_default_qubit_jax.py
pearcandy/pennylane
dfa35989cd0798496e41999a197bcf0eb26185df
[ "Apache-2.0" ]
null
null
null
import pytest jax = pytest.importorskip("jax", minversion="0.2") jnp = jax.numpy import numpy as np import pennylane as qml from pennylane.devices.default_qubit_jax import DefaultQubitJax pytestmark = pytest.mark.usefixtures("tape_mode") class TestQNodeIntegration: """Integration tests for default.qubit.jax. This test ensures it integrates properly with the PennyLane UI, in particular the new QNode.""" def test_defines_correct_capabilities(self): """Test that the device defines the right capabilities""" dev = qml.device("default.qubit.jax", wires=1) cap = dev.capabilities() capabilities = { "model": "qubit", "supports_finite_shots": True, "supports_tensor_observables": True, "returns_probs": True, "returns_state": True, "supports_reversible_diff": False, "supports_inverse_operations": True, "supports_analytic_computation": True, "passthru_interface": "jax", } assert cap == capabilities def test_defines_correct_capabilities_directly_from_class(self): """Test that the device defines the right capabilities""" dev = DefaultQubitJax(wires=1) cap = dev.capabilities() assert cap["supports_reversible_diff"] == False assert cap["passthru_interface"] == "jax" def test_load_device(self): """Test that the plugin device loads correctly""" dev = qml.device("default.qubit.jax", wires=2) assert dev.num_wires == 2 assert dev.shots == 1000 assert dev.analytic assert dev.short_name == "default.qubit.jax" assert dev.capabilities()["passthru_interface"] == "jax" def test_qubit_circuit(self, tol): """Test that the device provides the correct result for a simple circuit.""" p = jnp.array(0.543) dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax") def circuit(x): qml.RX(x, wires=0) return qml.expval(qml.PauliY(0)) expected = -jnp.sin(p) if not qml.tape_mode_active(): assert isinstance(circuit, qml.qnodes.PassthruQNode) assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0) def test_qubit_circuit_with_jit(self, tol): """Test that the device provides the correct result for a simple circuit under a jax.jit.""" p = jnp.array(0.543) dev = qml.device("default.qubit.jax", wires=1) @jax.jit @qml.qnode(dev, interface="jax") def circuit(x): qml.RX(x, wires=0) return qml.expval(qml.PauliY(0)) expected = -jnp.sin(p) # Do not test isinstance here since the @jax.jit changes the function # type. # Just test that it works and spits our the right value. assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0) def test_correct_state(self, tol): """Test that the device state is correct after applying a quantum function on the device""" dev = qml.device("default.qubit.jax", wires=2) state = dev.state expected = jnp.array([1, 0, 0, 0]) assert jnp.allclose(state, expected, atol=tol, rtol=0) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.Hadamard(wires=0) qml.RZ(jnp.pi / 4, wires=0) return qml.expval(qml.PauliZ(0)) circuit() state = dev.state amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2) expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0]) assert jnp.allclose(state, expected, atol=tol, rtol=0) def test_correct_state_returned(self, tol): """Test that the device state is correct after applying a quantum function on the device""" if not qml.tape_mode_active(): pytest.skip("Only supported in tape mode") dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.Hadamard(wires=0) qml.RZ(jnp.pi / 4, wires=0) return qml.state() state = circuit() amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2) expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0]) assert jnp.allclose(state, expected, atol=tol, rtol=0) def test_sampling_with_jit(self): """Test that sampling works with a jax.jit""" @jax.jit def circuit(key): dev = qml.device("default.qubit.jax", wires=1, prng_key=key) @qml.qnode(dev, interface="jax", diff_method="backprop") def inner_circuit(): qml.Hadamard(0) return qml.sample(qml.PauliZ(wires=0)) return inner_circuit() a = circuit(jax.random.PRNGKey(0)) b = circuit(jax.random.PRNGKey(0)) c = circuit(jax.random.PRNGKey(1)) np.testing.assert_array_equal(a, b) assert not np.all(a == c) def test_sampling_op_by_op(self): """Test that op-by-op sampling works as a new user would expect""" dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.Hadamard(0) return qml.sample(qml.PauliZ(wires=0)) a = circuit() b = circuit() assert not np.all(a == b) def test_gates_dont_crash(self): """Test for gates that weren't covered by other tests. """ dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.CRZ(0.0, wires=[0, 1]) qml.CRot(1.0, 0.0, 0.0, wires=[0, 1]) qml.CRY(0.0, wires=[0, 1]) return qml.sample(qml.PauliZ(wires=0)) circuit() # Just don't crash. def test_diagonal_doesnt_crash(self): """Test that diagonal gates can be used.""" dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.DiagonalQubitUnitary(np.array([1.0, 1.0]), wires=0) return qml.sample(qml.PauliZ(wires=0)) circuit() # Just don't crash. class TestPassthruIntegration: """Tests for integration with the PassthruQNode""" @pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev]) def test_jacobian_variable_multiply(self, tol, jacobian_transform): """Test that jacobian of a QNode with an attached default.qubit.jax device gives the correct result in the case of parameters multiplied by scalars""" x = 0.43316321 y = 0.2162158 z = 0.75110998 weights = jnp.array([x, y, z]) dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax") def circuit(p): qml.RX(3 * p[0], wires=0) qml.RY(p[1], wires=0) qml.RX(p[2] / 2, wires=0) return qml.expval(qml.PauliZ(0)) if not qml.tape_mode_active(): assert isinstance(circuit, qml.qnodes.PassthruQNode) res = circuit(weights) expected = jnp.cos(3 * x) * jnp.cos(y) * jnp.cos(z / 2) - jnp.sin(3 * x) * jnp.sin(z / 2) assert jnp.allclose(res, expected, atol=tol, rtol=0) grad_fn = jacobian_transform(circuit, 0) res = grad_fn(jnp.array(weights)) expected = jnp.array( [ -3 * (jnp.sin(3 * x) * jnp.cos(y) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.sin(z / 2)), -jnp.cos(3 * x) * jnp.sin(y) * jnp.cos(z / 2), -0.5 * (jnp.sin(3 * x) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.cos(y) * jnp.sin(z / 2)), ] ) assert jnp.allclose(res, expected, atol=tol, rtol=0) @pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev]) def test_jacobian_repeated(self, tol, jacobian_transform): """Test that jacobian of a QNode with an attached default.qubit.jax device gives the correct result in the case of repeated parameters""" x = 0.43316321 y = 0.2162158 z = 0.75110998 p = jnp.array([x, y, z]) dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax") def circuit(x): qml.RX(x[1], wires=0) qml.Rot(x[0], x[1], x[2], wires=0) return qml.expval(qml.PauliZ(0)) res = circuit(p) expected = jnp.cos(y) ** 2 - jnp.sin(x) * jnp.sin(y) ** 2 assert jnp.allclose(res, expected, atol=tol, rtol=0) grad_fn = jacobian_transform(circuit, 0) res = grad_fn(p) expected = jnp.array( [-jnp.cos(x) * jnp.sin(y) ** 2, -2 * (jnp.sin(x) + 1) * jnp.sin(y) * jnp.cos(y), 0] ) assert jnp.allclose(res, expected, atol=tol, rtol=0) def test_state_differentiability(self, tol): """Test that the device state can be differentiated""" dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(a): qml.RY(a, wires=0) return qml.expval(qml.PauliZ(0)) a = jnp.array(0.54) def cost(a): """A function of the device quantum state, as a function of ijnput QNode parameters.""" circuit(a) res = jnp.abs(dev.state) ** 2 return res[1] - res[0] grad = jax.grad(cost)(a) expected = jnp.sin(a) assert jnp.allclose(grad, expected, atol=tol, rtol=0) def test_prob_differentiability(self, tol): """Test that the device probability can be differentiated""" dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(a, b): qml.RX(a, wires=0) qml.RY(b, wires=1) qml.CNOT(wires=[0, 1]) return qml.probs(wires=[1]) a = jnp.array(0.54) b = jnp.array(0.12) def cost(a, b): prob_wire_1 = circuit(a, b).squeeze() return prob_wire_1[1] - prob_wire_1[0] res = cost(a, b) expected = -jnp.cos(a) * jnp.cos(b) assert jnp.allclose(res, expected, atol=tol, rtol=0) grad = jax.jit(jax.grad(cost, argnums=(0, 1)))(a, b) expected = [jnp.sin(a) * jnp.cos(b), jnp.cos(a) * jnp.sin(b)] assert jnp.allclose(grad, expected, atol=tol, rtol=0) def test_backprop_gradient(self, tol): """Tests that the gradient of the qnode is correct""" dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(a, b): qml.RX(a, wires=0) qml.CRX(b, wires=[0, 1]) return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) a = jnp.array(-0.234) b = jnp.array(0.654) res = circuit(a, b) expected_cost = 0.5 * (jnp.cos(a) * jnp.cos(b) + jnp.cos(a) - jnp.cos(b) + 1) assert jnp.allclose(res, expected_cost, atol=tol, rtol=0) res = jax.grad(lambda x, y: circuit(x, y).reshape(()), argnums=(0, 1))(a, b) expected_grad = jnp.array( [-0.5 * jnp.sin(a) * (jnp.cos(b) + 1), 0.5 * jnp.sin(b) * (1 - jnp.cos(a))] ) assert jnp.allclose(res, expected_grad, atol=tol, rtol=0) @pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition]) @pytest.mark.parametrize("diff_method", ["backprop"]) def test_jax_interface_gradient(self, operation, diff_method, tol): """Tests that the gradient of an arbitrary U3 gate is correct using the Jax interface, using a variety of differentiation methods.""" dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, diff_method=diff_method, interface="jax") def circuit(x, weights, w=None): """In this example, a mixture of scalar arguments, array arguments, and keyword arguments are used.""" qml.QubitStateVector(1j * jnp.array([1, -1]) / jnp.sqrt(2), wires=w) operation(x, weights[0], weights[1], wires=w) return qml.expval(qml.PauliX(w)) # Check that the correct QNode type is being used. if not qml.tape_mode_active(): if diff_method == "backprop": assert isinstance(circuit, qml.qnodes.PassthruQNode) assert not hasattr(circuit, "jacobian") else: assert not isinstance(circuit, qml.qnodes.PassthruQNode) assert hasattr(circuit, "jacobian") def cost(params): """Perform some classical processing""" return (circuit(params[0], params[1:], w=0) ** 2).reshape(()) theta = 0.543 phi = -0.234 lam = 0.654 params = jnp.array([theta, phi, lam]) res = cost(params) expected_cost = ( jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi) ) ** 2 assert jnp.allclose(res, expected_cost, atol=tol, rtol=0) res = jax.grad(cost)(params) expected_grad = ( jnp.array( [ jnp.sin(theta) * jnp.cos(lam) * jnp.cos(phi), jnp.cos(theta) * jnp.cos(lam) * jnp.sin(phi) + jnp.sin(lam) * jnp.cos(phi), jnp.cos(theta) * jnp.sin(lam) * jnp.cos(phi) + jnp.cos(lam) * jnp.sin(phi), ] ) * 2 * (jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi)) ) assert jnp.allclose(res, expected_grad, atol=tol, rtol=0) @pytest.mark.parametrize("interface", ["autograd", "tf", "torch"]) def test_error_backprop_wrong_interface(self, interface, tol): """Tests that an error is raised if diff_method='backprop' but not using the Jax interface""" dev = qml.device("default.qubit.jax", wires=1) def circuit(x, w=None): qml.RZ(x, wires=w) return qml.expval(qml.PauliX(w)) error_type = qml.QuantumFunctionError if qml.tape_mode_active() else ValueError with pytest.raises( error_type, match="default.qubit.jax only supports diff_method='backprop' when using the jax interface", ): qml.qnode(dev, diff_method="backprop", interface=interface)(circuit) class TestHighLevelIntegration: """Tests for integration with higher level components of PennyLane.""" def test_template_integration(self): """Test that a PassthruQNode using default.qubit.jax works with templates.""" dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(weights): qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) return qml.expval(qml.PauliZ(0)) weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2)) grad = jax.grad(lambda a: circuit(a).reshape(()))(weights) assert grad.shape == weights.shape def test_qnode_collection_integration(self): """Test that a PassthruQNode using default.qubit.jax works with QNodeCollections.""" dev = qml.device("default.qubit.jax", wires=2) def ansatz(weights, **kwargs): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=1) qml.CNOT(wires=[0, 1]) obs_list = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliZ(1)] qnodes = qml.map(ansatz, obs_list, dev, interface="jax") if not qml.tape_mode_active(): assert qnodes.interface == "jax" weights = jnp.array([0.1, 0.2]) def cost(weights): return jnp.sum(jnp.array(qnodes(weights))) grad = jax.grad(cost)(weights) assert grad.shape == weights.shape def test_non_backprop_error(self): """Test that an error is raised in tape mode if the diff method is not backprop""" if not qml.tape_mode_active(): pytest.skip("Test only applies in tape mode") dev = qml.device("default.qubit.jax", wires=2) def circuit(weights): qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) return qml.expval(qml.PauliZ(0)) qnode = qml.QNode(circuit, dev, interface="jax", diff_method="parameter-shift") weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2)) with pytest.raises(qml.QuantumFunctionError, match="The JAX interface can only be used with"): qnode(weights) class TestOps: """Unit tests for operations supported by the default.qubit.jax device""" @pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev]) def test_multirz_jacobian(self, jacobian_transform): """Test that the patched numpy functions are used for the MultiRZ operation and the jacobian can be computed.""" wires = 4 dev = qml.device("default.qubit.jax", wires=wires) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(param): qml.MultiRZ(param, wires=[0, 1]) return qml.probs(wires=list(range(wires))) param = 0.3 res = jacobian_transform(circuit)(param) assert jnp.allclose(res, jnp.zeros(wires ** 2)) def test_full_subsystem(self, mocker): """Test applying a state vector to the full subsystem""" dev = DefaultQubitJax(wires=["a", "b", "c"]) state = jnp.array([1, 0, 0, 0, 1, 0, 1, 1]) / 2.0 state_wires = qml.wires.Wires(["a", "b", "c"]) spy = mocker.spy(dev, "_scatter") dev._apply_state_vector(state=state, device_wires=state_wires) assert jnp.all(dev._state.flatten() == state) spy.assert_not_called() def test_partial_subsystem(self, mocker): """Test applying a state vector to a subset of wires of the full subsystem""" dev = DefaultQubitJax(wires=["a", "b", "c"]) state = jnp.array([1, 0, 1, 0]) / jnp.sqrt(2.0) state_wires = qml.wires.Wires(["a", "c"]) spy = mocker.spy(dev, "_scatter") dev._apply_state_vector(state=state, device_wires=state_wires) res = jnp.sum(dev._state, axis=(1,)).flatten() assert jnp.all(res == state) spy.assert_called()
38.671371
105
0.57265
import pytest jax = pytest.importorskip("jax", minversion="0.2") jnp = jax.numpy import numpy as np import pennylane as qml from pennylane.devices.default_qubit_jax import DefaultQubitJax pytestmark = pytest.mark.usefixtures("tape_mode") class TestQNodeIntegration: def test_defines_correct_capabilities(self): dev = qml.device("default.qubit.jax", wires=1) cap = dev.capabilities() capabilities = { "model": "qubit", "supports_finite_shots": True, "supports_tensor_observables": True, "returns_probs": True, "returns_state": True, "supports_reversible_diff": False, "supports_inverse_operations": True, "supports_analytic_computation": True, "passthru_interface": "jax", } assert cap == capabilities def test_defines_correct_capabilities_directly_from_class(self): dev = DefaultQubitJax(wires=1) cap = dev.capabilities() assert cap["supports_reversible_diff"] == False assert cap["passthru_interface"] == "jax" def test_load_device(self): dev = qml.device("default.qubit.jax", wires=2) assert dev.num_wires == 2 assert dev.shots == 1000 assert dev.analytic assert dev.short_name == "default.qubit.jax" assert dev.capabilities()["passthru_interface"] == "jax" def test_qubit_circuit(self, tol): p = jnp.array(0.543) dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax") def circuit(x): qml.RX(x, wires=0) return qml.expval(qml.PauliY(0)) expected = -jnp.sin(p) if not qml.tape_mode_active(): assert isinstance(circuit, qml.qnodes.PassthruQNode) assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0) def test_qubit_circuit_with_jit(self, tol): p = jnp.array(0.543) dev = qml.device("default.qubit.jax", wires=1) @jax.jit @qml.qnode(dev, interface="jax") def circuit(x): qml.RX(x, wires=0) return qml.expval(qml.PauliY(0)) expected = -jnp.sin(p) assert jnp.isclose(circuit(p), expected, atol=tol, rtol=0) def test_correct_state(self, tol): dev = qml.device("default.qubit.jax", wires=2) state = dev.state expected = jnp.array([1, 0, 0, 0]) assert jnp.allclose(state, expected, atol=tol, rtol=0) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.Hadamard(wires=0) qml.RZ(jnp.pi / 4, wires=0) return qml.expval(qml.PauliZ(0)) circuit() state = dev.state amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2) expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0]) assert jnp.allclose(state, expected, atol=tol, rtol=0) def test_correct_state_returned(self, tol): if not qml.tape_mode_active(): pytest.skip("Only supported in tape mode") dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.Hadamard(wires=0) qml.RZ(jnp.pi / 4, wires=0) return qml.state() state = circuit() amplitude = jnp.exp(-1j * jnp.pi / 8) / jnp.sqrt(2) expected = jnp.array([amplitude, 0, jnp.conj(amplitude), 0]) assert jnp.allclose(state, expected, atol=tol, rtol=0) def test_sampling_with_jit(self): @jax.jit def circuit(key): dev = qml.device("default.qubit.jax", wires=1, prng_key=key) @qml.qnode(dev, interface="jax", diff_method="backprop") def inner_circuit(): qml.Hadamard(0) return qml.sample(qml.PauliZ(wires=0)) return inner_circuit() a = circuit(jax.random.PRNGKey(0)) b = circuit(jax.random.PRNGKey(0)) c = circuit(jax.random.PRNGKey(1)) np.testing.assert_array_equal(a, b) assert not np.all(a == c) def test_sampling_op_by_op(self): dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.Hadamard(0) return qml.sample(qml.PauliZ(wires=0)) a = circuit() b = circuit() assert not np.all(a == b) def test_gates_dont_crash(self): dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.CRZ(0.0, wires=[0, 1]) qml.CRot(1.0, 0.0, 0.0, wires=[0, 1]) qml.CRY(0.0, wires=[0, 1]) return qml.sample(qml.PauliZ(wires=0)) circuit() def test_diagonal_doesnt_crash(self): dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax", diff_method="backprop") def circuit(): qml.DiagonalQubitUnitary(np.array([1.0, 1.0]), wires=0) return qml.sample(qml.PauliZ(wires=0)) circuit() # Just don't crash. class TestPassthruIntegration: @pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev]) def test_jacobian_variable_multiply(self, tol, jacobian_transform): x = 0.43316321 y = 0.2162158 z = 0.75110998 weights = jnp.array([x, y, z]) dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax") def circuit(p): qml.RX(3 * p[0], wires=0) qml.RY(p[1], wires=0) qml.RX(p[2] / 2, wires=0) return qml.expval(qml.PauliZ(0)) if not qml.tape_mode_active(): assert isinstance(circuit, qml.qnodes.PassthruQNode) res = circuit(weights) expected = jnp.cos(3 * x) * jnp.cos(y) * jnp.cos(z / 2) - jnp.sin(3 * x) * jnp.sin(z / 2) assert jnp.allclose(res, expected, atol=tol, rtol=0) grad_fn = jacobian_transform(circuit, 0) res = grad_fn(jnp.array(weights)) expected = jnp.array( [ -3 * (jnp.sin(3 * x) * jnp.cos(y) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.sin(z / 2)), -jnp.cos(3 * x) * jnp.sin(y) * jnp.cos(z / 2), -0.5 * (jnp.sin(3 * x) * jnp.cos(z / 2) + jnp.cos(3 * x) * jnp.cos(y) * jnp.sin(z / 2)), ] ) assert jnp.allclose(res, expected, atol=tol, rtol=0) @pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev]) def test_jacobian_repeated(self, tol, jacobian_transform): x = 0.43316321 y = 0.2162158 z = 0.75110998 p = jnp.array([x, y, z]) dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, interface="jax") def circuit(x): qml.RX(x[1], wires=0) qml.Rot(x[0], x[1], x[2], wires=0) return qml.expval(qml.PauliZ(0)) res = circuit(p) expected = jnp.cos(y) ** 2 - jnp.sin(x) * jnp.sin(y) ** 2 assert jnp.allclose(res, expected, atol=tol, rtol=0) grad_fn = jacobian_transform(circuit, 0) res = grad_fn(p) expected = jnp.array( [-jnp.cos(x) * jnp.sin(y) ** 2, -2 * (jnp.sin(x) + 1) * jnp.sin(y) * jnp.cos(y), 0] ) assert jnp.allclose(res, expected, atol=tol, rtol=0) def test_state_differentiability(self, tol): dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(a): qml.RY(a, wires=0) return qml.expval(qml.PauliZ(0)) a = jnp.array(0.54) def cost(a): circuit(a) res = jnp.abs(dev.state) ** 2 return res[1] - res[0] grad = jax.grad(cost)(a) expected = jnp.sin(a) assert jnp.allclose(grad, expected, atol=tol, rtol=0) def test_prob_differentiability(self, tol): dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(a, b): qml.RX(a, wires=0) qml.RY(b, wires=1) qml.CNOT(wires=[0, 1]) return qml.probs(wires=[1]) a = jnp.array(0.54) b = jnp.array(0.12) def cost(a, b): prob_wire_1 = circuit(a, b).squeeze() return prob_wire_1[1] - prob_wire_1[0] res = cost(a, b) expected = -jnp.cos(a) * jnp.cos(b) assert jnp.allclose(res, expected, atol=tol, rtol=0) grad = jax.jit(jax.grad(cost, argnums=(0, 1)))(a, b) expected = [jnp.sin(a) * jnp.cos(b), jnp.cos(a) * jnp.sin(b)] assert jnp.allclose(grad, expected, atol=tol, rtol=0) def test_backprop_gradient(self, tol): dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(a, b): qml.RX(a, wires=0) qml.CRX(b, wires=[0, 1]) return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) a = jnp.array(-0.234) b = jnp.array(0.654) res = circuit(a, b) expected_cost = 0.5 * (jnp.cos(a) * jnp.cos(b) + jnp.cos(a) - jnp.cos(b) + 1) assert jnp.allclose(res, expected_cost, atol=tol, rtol=0) res = jax.grad(lambda x, y: circuit(x, y).reshape(()), argnums=(0, 1))(a, b) expected_grad = jnp.array( [-0.5 * jnp.sin(a) * (jnp.cos(b) + 1), 0.5 * jnp.sin(b) * (1 - jnp.cos(a))] ) assert jnp.allclose(res, expected_grad, atol=tol, rtol=0) @pytest.mark.parametrize("operation", [qml.U3, qml.U3.decomposition]) @pytest.mark.parametrize("diff_method", ["backprop"]) def test_jax_interface_gradient(self, operation, diff_method, tol): dev = qml.device("default.qubit.jax", wires=1) @qml.qnode(dev, diff_method=diff_method, interface="jax") def circuit(x, weights, w=None): qml.QubitStateVector(1j * jnp.array([1, -1]) / jnp.sqrt(2), wires=w) operation(x, weights[0], weights[1], wires=w) return qml.expval(qml.PauliX(w)) if not qml.tape_mode_active(): if diff_method == "backprop": assert isinstance(circuit, qml.qnodes.PassthruQNode) assert not hasattr(circuit, "jacobian") else: assert not isinstance(circuit, qml.qnodes.PassthruQNode) assert hasattr(circuit, "jacobian") def cost(params): return (circuit(params[0], params[1:], w=0) ** 2).reshape(()) theta = 0.543 phi = -0.234 lam = 0.654 params = jnp.array([theta, phi, lam]) res = cost(params) expected_cost = ( jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi) ) ** 2 assert jnp.allclose(res, expected_cost, atol=tol, rtol=0) res = jax.grad(cost)(params) expected_grad = ( jnp.array( [ jnp.sin(theta) * jnp.cos(lam) * jnp.cos(phi), jnp.cos(theta) * jnp.cos(lam) * jnp.sin(phi) + jnp.sin(lam) * jnp.cos(phi), jnp.cos(theta) * jnp.sin(lam) * jnp.cos(phi) + jnp.cos(lam) * jnp.sin(phi), ] ) * 2 * (jnp.sin(lam) * jnp.sin(phi) - jnp.cos(theta) * jnp.cos(lam) * jnp.cos(phi)) ) assert jnp.allclose(res, expected_grad, atol=tol, rtol=0) @pytest.mark.parametrize("interface", ["autograd", "tf", "torch"]) def test_error_backprop_wrong_interface(self, interface, tol): dev = qml.device("default.qubit.jax", wires=1) def circuit(x, w=None): qml.RZ(x, wires=w) return qml.expval(qml.PauliX(w)) error_type = qml.QuantumFunctionError if qml.tape_mode_active() else ValueError with pytest.raises( error_type, match="default.qubit.jax only supports diff_method='backprop' when using the jax interface", ): qml.qnode(dev, diff_method="backprop", interface=interface)(circuit) class TestHighLevelIntegration: def test_template_integration(self): dev = qml.device("default.qubit.jax", wires=2) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(weights): qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) return qml.expval(qml.PauliZ(0)) weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2)) grad = jax.grad(lambda a: circuit(a).reshape(()))(weights) assert grad.shape == weights.shape def test_qnode_collection_integration(self): dev = qml.device("default.qubit.jax", wires=2) def ansatz(weights, **kwargs): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=1) qml.CNOT(wires=[0, 1]) obs_list = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliZ(1)] qnodes = qml.map(ansatz, obs_list, dev, interface="jax") if not qml.tape_mode_active(): assert qnodes.interface == "jax" weights = jnp.array([0.1, 0.2]) def cost(weights): return jnp.sum(jnp.array(qnodes(weights))) grad = jax.grad(cost)(weights) assert grad.shape == weights.shape def test_non_backprop_error(self): if not qml.tape_mode_active(): pytest.skip("Test only applies in tape mode") dev = qml.device("default.qubit.jax", wires=2) def circuit(weights): qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1]) return qml.expval(qml.PauliZ(0)) qnode = qml.QNode(circuit, dev, interface="jax", diff_method="parameter-shift") weights = jnp.array(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2)) with pytest.raises(qml.QuantumFunctionError, match="The JAX interface can only be used with"): qnode(weights) class TestOps: @pytest.mark.parametrize("jacobian_transform", [jax.jacfwd, jax.jacrev]) def test_multirz_jacobian(self, jacobian_transform): wires = 4 dev = qml.device("default.qubit.jax", wires=wires) @qml.qnode(dev, diff_method="backprop", interface="jax") def circuit(param): qml.MultiRZ(param, wires=[0, 1]) return qml.probs(wires=list(range(wires))) param = 0.3 res = jacobian_transform(circuit)(param) assert jnp.allclose(res, jnp.zeros(wires ** 2)) def test_full_subsystem(self, mocker): dev = DefaultQubitJax(wires=["a", "b", "c"]) state = jnp.array([1, 0, 0, 0, 1, 0, 1, 1]) / 2.0 state_wires = qml.wires.Wires(["a", "b", "c"]) spy = mocker.spy(dev, "_scatter") dev._apply_state_vector(state=state, device_wires=state_wires) assert jnp.all(dev._state.flatten() == state) spy.assert_not_called() def test_partial_subsystem(self, mocker): dev = DefaultQubitJax(wires=["a", "b", "c"]) state = jnp.array([1, 0, 1, 0]) / jnp.sqrt(2.0) state_wires = qml.wires.Wires(["a", "c"]) spy = mocker.spy(dev, "_scatter") dev._apply_state_vector(state=state, device_wires=state_wires) res = jnp.sum(dev._state, axis=(1,)).flatten() assert jnp.all(res == state) spy.assert_called()
true
true
f703c319ac98938777a90feb59bfeb1ef81ebb42
2,199
py
Python
src/pytest_mock_resources/fixture/database/relational/redshift/__init__.py
schireson/pytest-mock-resources
a09fc18eeeac06c5589854ce200fa45f64c81cb5
[ "MIT" ]
49
2020-01-24T21:08:43.000Z
2022-03-31T23:55:21.000Z
src/pytest_mock_resources/fixture/database/relational/redshift/__init__.py
schireson/pytest-mock-resources
a09fc18eeeac06c5589854ce200fa45f64c81cb5
[ "MIT" ]
29
2020-03-11T19:07:50.000Z
2022-03-30T16:49:06.000Z
src/pytest_mock_resources/fixture/database/relational/redshift/__init__.py
schireson/pytest-mock-resources
a09fc18eeeac06c5589854ce200fa45f64c81cb5
[ "MIT" ]
10
2020-01-23T19:04:09.000Z
2022-02-22T19:57:54.000Z
import pytest from pytest_mock_resources.fixture.database.generic import assign_fixture_credentials from pytest_mock_resources.fixture.database.relational.generic import EngineManager from pytest_mock_resources.fixture.database.relational.postgresql import ( _create_clean_database, get_sqlalchemy_engine, ) from pytest_mock_resources.patch.redshift import psycopg2, sqlalchemy def create_redshift_fixture(*ordered_actions, scope="function", tables=None, session=None): """Produce a Redshift fixture. Any number of fixture functions can be created. Under the hood they will all share the same database server. Arguments: ordered_actions: Any number of ordered actions to be run on test setup. scope: Passthrough pytest's fixture scope. tables: Subsets the tables created by `ordered_actions`. This is generally most useful when a model-base was specified in `ordered_actions`. session: Whether to return a session instead of an engine directly. This can either be a bool or a callable capable of producing a session. """ from pytest_mock_resources.fixture.database.relational.redshift.udf import REDSHIFT_UDFS ordered_actions = ordered_actions + (REDSHIFT_UDFS,) @pytest.fixture(scope=scope) def _(_redshift_container, pmr_postgres_config): database_name = _create_clean_database(pmr_postgres_config) engine = get_sqlalchemy_engine(pmr_postgres_config, database_name) assign_fixture_credentials( engine, drivername="postgresql+psycopg2", host=pmr_postgres_config.host, port=pmr_postgres_config.port, database=database_name, username=pmr_postgres_config.username, password=pmr_postgres_config.password, ) engine = sqlalchemy.substitute_execute_with_custom_execute(engine) engine_manager = EngineManager( engine, ordered_actions, tables=tables, default_schema="public" ) with psycopg2.patch_connect(pmr_postgres_config): for engine in engine_manager.manage(session=session): yield engine return _
39.267857
95
0.729877
import pytest from pytest_mock_resources.fixture.database.generic import assign_fixture_credentials from pytest_mock_resources.fixture.database.relational.generic import EngineManager from pytest_mock_resources.fixture.database.relational.postgresql import ( _create_clean_database, get_sqlalchemy_engine, ) from pytest_mock_resources.patch.redshift import psycopg2, sqlalchemy def create_redshift_fixture(*ordered_actions, scope="function", tables=None, session=None): from pytest_mock_resources.fixture.database.relational.redshift.udf import REDSHIFT_UDFS ordered_actions = ordered_actions + (REDSHIFT_UDFS,) @pytest.fixture(scope=scope) def _(_redshift_container, pmr_postgres_config): database_name = _create_clean_database(pmr_postgres_config) engine = get_sqlalchemy_engine(pmr_postgres_config, database_name) assign_fixture_credentials( engine, drivername="postgresql+psycopg2", host=pmr_postgres_config.host, port=pmr_postgres_config.port, database=database_name, username=pmr_postgres_config.username, password=pmr_postgres_config.password, ) engine = sqlalchemy.substitute_execute_with_custom_execute(engine) engine_manager = EngineManager( engine, ordered_actions, tables=tables, default_schema="public" ) with psycopg2.patch_connect(pmr_postgres_config): for engine in engine_manager.manage(session=session): yield engine return _
true
true
f703c343b85bbcb32c648a5f8668d2512894a13a
2,586
py
Python
IMLearn/learners/regressors/polynomial_fitting.py
RotemBadash/IML.HUJI
2b20d074c159123f61b321a7e84312ab82400949
[ "MIT" ]
null
null
null
IMLearn/learners/regressors/polynomial_fitting.py
RotemBadash/IML.HUJI
2b20d074c159123f61b321a7e84312ab82400949
[ "MIT" ]
null
null
null
IMLearn/learners/regressors/polynomial_fitting.py
RotemBadash/IML.HUJI
2b20d074c159123f61b321a7e84312ab82400949
[ "MIT" ]
null
null
null
from __future__ import annotations from typing import NoReturn from . import LinearRegression from ...base import BaseEstimator import numpy as np class PolynomialFitting(BaseEstimator): """ Polynomial Fitting using Least Squares estimation """ def __init__(self, k: int) -> PolynomialFitting: """ Instantiate a polynomial fitting estimator Parameters ---------- k : int Degree of polynomial to fit """ super().__init__() self.degree = k self.linear_regression_model = LinearRegression( include_intercept=False) def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn: """ Fit Least Squares model to polynomial transformed samples Parameters ---------- X : ndarray of shape (n_samples, n_features) Input data to fit an estimator for y : ndarray of shape (n_samples, ) Responses of input data to fit to """ x = self.__transform(X) self.linear_regression_model.fit(x, y) def _predict(self, X: np.ndarray) -> np.ndarray: """ Predict responses for given samples using fitted estimator Parameters ---------- X : ndarray of shape (n_samples, n_features) Input data to predict responses for Returns ------- responses : ndarray of shape (n_samples, ) Predicted responses of given samples """ x = self.__transform(X) return self.linear_regression_model.predict(x) def _loss(self, X: np.ndarray, y: np.ndarray) -> float: """ Evaluate performance under MSE loss function Parameters ---------- X : ndarray of shape (n_samples, n_features) Test samples y : ndarray of shape (n_samples, ) True labels of test samples Returns ------- loss : float Performance under MSE loss function """ x = self.__transform(X) return self.linear_regression_model.loss(x, y) def __transform(self, X: np.ndarray) -> np.ndarray: """ Transform given input according to the univariate polynomial transformation Parameters ---------- X: ndarray of shape (n_samples,) Returns ------- transformed: ndarray of shape (n_samples, k+1) Vandermonde matrix of given samples up to degree k """ return np.vander(X, N=self.degree+1, increasing=True)
28.108696
68
0.5785
from __future__ import annotations from typing import NoReturn from . import LinearRegression from ...base import BaseEstimator import numpy as np class PolynomialFitting(BaseEstimator): def __init__(self, k: int) -> PolynomialFitting: super().__init__() self.degree = k self.linear_regression_model = LinearRegression( include_intercept=False) def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn: x = self.__transform(X) self.linear_regression_model.fit(x, y) def _predict(self, X: np.ndarray) -> np.ndarray: x = self.__transform(X) return self.linear_regression_model.predict(x) def _loss(self, X: np.ndarray, y: np.ndarray) -> float: x = self.__transform(X) return self.linear_regression_model.loss(x, y) def __transform(self, X: np.ndarray) -> np.ndarray: return np.vander(X, N=self.degree+1, increasing=True)
true
true
f703c645aa7c72fe4e04cdbf60f8a8cc84e7b617
1,238
py
Python
alipay/aop/api/domain/PriceInformation.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
213
2018-08-27T16:49:32.000Z
2021-12-29T04:34:12.000Z
alipay/aop/api/domain/PriceInformation.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
29
2018-09-29T06:43:00.000Z
2021-09-02T03:27:32.000Z
alipay/aop/api/domain/PriceInformation.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
59
2018-08-27T16:59:26.000Z
2022-03-25T10:08:15.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class PriceInformation(object): def __init__(self): self._amount = None self._type = None @property def amount(self): return self._amount @amount.setter def amount(self, value): self._amount = value @property def type(self): return self._type @type.setter def type(self, value): self._type = value def to_alipay_dict(self): params = dict() if self.amount: if hasattr(self.amount, 'to_alipay_dict'): params['amount'] = self.amount.to_alipay_dict() else: params['amount'] = self.amount if self.type: if hasattr(self.type, 'to_alipay_dict'): params['type'] = self.type.to_alipay_dict() else: params['type'] = self.type return params @staticmethod def from_alipay_dict(d): if not d: return None o = PriceInformation() if 'amount' in d: o.amount = d['amount'] if 'type' in d: o.type = d['type'] return o
22.107143
63
0.53958
import json from alipay.aop.api.constant.ParamConstants import * class PriceInformation(object): def __init__(self): self._amount = None self._type = None @property def amount(self): return self._amount @amount.setter def amount(self, value): self._amount = value @property def type(self): return self._type @type.setter def type(self, value): self._type = value def to_alipay_dict(self): params = dict() if self.amount: if hasattr(self.amount, 'to_alipay_dict'): params['amount'] = self.amount.to_alipay_dict() else: params['amount'] = self.amount if self.type: if hasattr(self.type, 'to_alipay_dict'): params['type'] = self.type.to_alipay_dict() else: params['type'] = self.type return params @staticmethod def from_alipay_dict(d): if not d: return None o = PriceInformation() if 'amount' in d: o.amount = d['amount'] if 'type' in d: o.type = d['type'] return o
true
true
f703c684e0729337f837439e10d1d599b1cb1d38
8,121
py
Python
tests/unit/controllers/api/v1/test_traces_rbac.py
ankita-orchestral/st2-rbac-backend
c90191dd1ff126362f9fdecca0fa1b694288641f
[ "Apache-2.0" ]
1
2020-09-21T16:05:31.000Z
2020-09-21T16:05:31.000Z
tests/unit/controllers/api/v1/test_traces_rbac.py
ankita-orchestral/st2-rbac-backend
c90191dd1ff126362f9fdecca0fa1b694288641f
[ "Apache-2.0" ]
18
2020-09-18T19:07:03.000Z
2022-02-25T07:02:17.000Z
tests/unit/controllers/api/v1/test_traces_rbac.py
ankita-orchestral/st2-rbac-backend
c90191dd1ff126362f9fdecca0fa1b694288641f
[ "Apache-2.0" ]
4
2020-08-27T12:24:51.000Z
2021-09-22T10:09:18.000Z
# Copyright 2020 The StackStorm Authors. # Copyright (C) 2020 Extreme Networks, Inc - All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from st2common.rbac.types import PermissionType from st2common.rbac.types import ResourceType from st2common.persistence.auth import User from st2common.persistence.rbac import Role from st2common.persistence.rbac import UserRoleAssignment from st2common.persistence.rbac import PermissionGrant from st2common.models.db.auth import UserDB from st2common.models.db.rbac import RoleDB from st2common.models.db.rbac import UserRoleAssignmentDB from st2common.models.db.rbac import PermissionGrantDB from st2tests.fixturesloader import FixturesLoader from st2api.controllers.v1.traces import TracesController from tests.base import APIControllerWithRBACTestCase from st2tests.api import APIControllerWithIncludeAndExcludeFilterTestCase http_client = six.moves.http_client __all__ = [ 'TraceControllerRBACTestCase' ] FIXTURES_PACK = 'generic' TEST_FIXTURES = { 'traces': ['trace_for_test_enforce.yaml', 'trace_for_test_enforce_2.yaml', 'trace_for_test_enforce_3.yaml'], } class TraceControllerRBACTestCase(APIControllerWithRBACTestCase, APIControllerWithIncludeAndExcludeFilterTestCase): # Attributes used by APIControllerWithIncludeAndExcludeFilterTestCase get_all_path = '/v1/traces' controller_cls = TracesController include_attribute_field_name = 'trace_tag' exclude_attribute_field_name = 'start_timestamp' rbac_enabled = True fixtures_loader = FixturesLoader() def setUp(self): super(TraceControllerRBACTestCase, self).setUp() self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_FIXTURES) file_name = 'trace_for_test_enforce.yaml' TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures( fixtures_pack=FIXTURES_PACK, fixtures_dict={'traces': [file_name]})['traces'][file_name] file_name = 'trace_for_test_enforce_2.yaml' TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures( fixtures_pack=FIXTURES_PACK, fixtures_dict={'traces': [file_name]})['traces'][file_name] file_name = 'trace_for_test_enforce_3.yaml' TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures( fixtures_pack=FIXTURES_PACK, fixtures_dict={'traces': [file_name]})['traces'][file_name] # Insert mock users, roles and assignments # Users user_1_db = UserDB(name='trace_list') user_1_db = User.add_or_update(user_1_db) self.users['trace_list'] = user_1_db user_2_db = UserDB(name='trace_view') user_2_db = User.add_or_update(user_2_db) self.users['trace_view'] = user_2_db # Roles # trace_list grant_db = PermissionGrantDB(resource_uid=None, resource_type=ResourceType.TRACE, permission_types=[PermissionType.TRACE_LIST]) grant_db = PermissionGrant.add_or_update(grant_db) permission_grants = [str(grant_db.id)] role_1_db = RoleDB(name='trace_list', permission_grants=permission_grants) role_1_db = Role.add_or_update(role_1_db) self.roles['trace_list'] = role_1_db # trace_view on trace 1 trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() grant_db = PermissionGrantDB(resource_uid=trace_uid, resource_type=ResourceType.TRACE, permission_types=[PermissionType.TRACE_VIEW]) grant_db = PermissionGrant.add_or_update(grant_db) permission_grants = [str(grant_db.id)] role_1_db = RoleDB(name='trace_view', permission_grants=permission_grants) role_1_db = Role.add_or_update(role_1_db) self.roles['trace_view'] = role_1_db # Role assignments role_assignment_db = UserRoleAssignmentDB( user=self.users['trace_list'].name, role=self.roles['trace_list'].name, source='assignments/%s.yaml' % self.users['trace_list'].name) UserRoleAssignment.add_or_update(role_assignment_db) role_assignment_db = UserRoleAssignmentDB( user=self.users['trace_view'].name, role=self.roles['trace_view'].name, source='assignments/%s.yaml' % self.users['trace_view'].name) UserRoleAssignment.add_or_update(role_assignment_db) def test_get_all_no_permissions(self): user_db = self.users['no_permissions'] self.use_user(user_db) resp = self.app.get('/v1/traces', expect_errors=True) expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_list"') self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def test_get_one_no_permissions(self): user_db = self.users['no_permissions'] self.use_user(user_db) trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True) expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_view"' ' on resource "%s"' % (trace_uid)) self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def test_get_all_permission_success_get_one_no_permission_failure(self): user_db = self.users['trace_list'] self.use_user(user_db) # trace_list permission, but no trace_view permission resp = self.app.get('/v1/traces') self.assertEqual(resp.status_code, http_client.OK) self.assertEqual(len(resp.json), 3) trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True) expected_msg = ('User "trace_list" doesn\'t have required permission "trace_view"' ' on resource "%s"' % (trace_uid)) self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def test_get_one_permission_success_get_all_no_permission_failure(self): user_db = self.users['trace_view'] self.use_user(user_db) # trace_view permission, but no trace_list permission trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() resp = self.app.get('/v1/traces/%s' % (trace_id)) self.assertEqual(resp.status_code, http_client.OK) self.assertEqual(resp.json['uid'], trace_uid) resp = self.app.get('/v1/traces', expect_errors=True) expected_msg = ('User "trace_view" doesn\'t have required permission "trace_list"') self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def _insert_mock_models(self): trace_ids = [trace['id'] for trace in self.models['traces'].values()] return trace_ids
44.377049
95
0.695111
import six from st2common.rbac.types import PermissionType from st2common.rbac.types import ResourceType from st2common.persistence.auth import User from st2common.persistence.rbac import Role from st2common.persistence.rbac import UserRoleAssignment from st2common.persistence.rbac import PermissionGrant from st2common.models.db.auth import UserDB from st2common.models.db.rbac import RoleDB from st2common.models.db.rbac import UserRoleAssignmentDB from st2common.models.db.rbac import PermissionGrantDB from st2tests.fixturesloader import FixturesLoader from st2api.controllers.v1.traces import TracesController from tests.base import APIControllerWithRBACTestCase from st2tests.api import APIControllerWithIncludeAndExcludeFilterTestCase http_client = six.moves.http_client __all__ = [ 'TraceControllerRBACTestCase' ] FIXTURES_PACK = 'generic' TEST_FIXTURES = { 'traces': ['trace_for_test_enforce.yaml', 'trace_for_test_enforce_2.yaml', 'trace_for_test_enforce_3.yaml'], } class TraceControllerRBACTestCase(APIControllerWithRBACTestCase, APIControllerWithIncludeAndExcludeFilterTestCase): get_all_path = '/v1/traces' controller_cls = TracesController include_attribute_field_name = 'trace_tag' exclude_attribute_field_name = 'start_timestamp' rbac_enabled = True fixtures_loader = FixturesLoader() def setUp(self): super(TraceControllerRBACTestCase, self).setUp() self.models = self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_FIXTURES) file_name = 'trace_for_test_enforce.yaml' TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures( fixtures_pack=FIXTURES_PACK, fixtures_dict={'traces': [file_name]})['traces'][file_name] file_name = 'trace_for_test_enforce_2.yaml' TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures( fixtures_pack=FIXTURES_PACK, fixtures_dict={'traces': [file_name]})['traces'][file_name] file_name = 'trace_for_test_enforce_3.yaml' TraceControllerRBACTestCase.TRACE_1 = self.fixtures_loader.load_fixtures( fixtures_pack=FIXTURES_PACK, fixtures_dict={'traces': [file_name]})['traces'][file_name] user_1_db = UserDB(name='trace_list') user_1_db = User.add_or_update(user_1_db) self.users['trace_list'] = user_1_db user_2_db = UserDB(name='trace_view') user_2_db = User.add_or_update(user_2_db) self.users['trace_view'] = user_2_db grant_db = PermissionGrantDB(resource_uid=None, resource_type=ResourceType.TRACE, permission_types=[PermissionType.TRACE_LIST]) grant_db = PermissionGrant.add_or_update(grant_db) permission_grants = [str(grant_db.id)] role_1_db = RoleDB(name='trace_list', permission_grants=permission_grants) role_1_db = Role.add_or_update(role_1_db) self.roles['trace_list'] = role_1_db trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() grant_db = PermissionGrantDB(resource_uid=trace_uid, resource_type=ResourceType.TRACE, permission_types=[PermissionType.TRACE_VIEW]) grant_db = PermissionGrant.add_or_update(grant_db) permission_grants = [str(grant_db.id)] role_1_db = RoleDB(name='trace_view', permission_grants=permission_grants) role_1_db = Role.add_or_update(role_1_db) self.roles['trace_view'] = role_1_db role_assignment_db = UserRoleAssignmentDB( user=self.users['trace_list'].name, role=self.roles['trace_list'].name, source='assignments/%s.yaml' % self.users['trace_list'].name) UserRoleAssignment.add_or_update(role_assignment_db) role_assignment_db = UserRoleAssignmentDB( user=self.users['trace_view'].name, role=self.roles['trace_view'].name, source='assignments/%s.yaml' % self.users['trace_view'].name) UserRoleAssignment.add_or_update(role_assignment_db) def test_get_all_no_permissions(self): user_db = self.users['no_permissions'] self.use_user(user_db) resp = self.app.get('/v1/traces', expect_errors=True) expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_list"') self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def test_get_one_no_permissions(self): user_db = self.users['no_permissions'] self.use_user(user_db) trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True) expected_msg = ('User "no_permissions" doesn\'t have required permission "trace_view"' ' on resource "%s"' % (trace_uid)) self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def test_get_all_permission_success_get_one_no_permission_failure(self): user_db = self.users['trace_list'] self.use_user(user_db) resp = self.app.get('/v1/traces') self.assertEqual(resp.status_code, http_client.OK) self.assertEqual(len(resp.json), 3) trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() resp = self.app.get('/v1/traces/%s' % (trace_id), expect_errors=True) expected_msg = ('User "trace_list" doesn\'t have required permission "trace_view"' ' on resource "%s"' % (trace_uid)) self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def test_get_one_permission_success_get_all_no_permission_failure(self): user_db = self.users['trace_view'] self.use_user(user_db) # trace_view permission, but no trace_list permission trace_id = self.models['traces']['trace_for_test_enforce.yaml'].id trace_uid = self.models['traces']['trace_for_test_enforce.yaml'].get_uid() resp = self.app.get('/v1/traces/%s' % (trace_id)) self.assertEqual(resp.status_code, http_client.OK) self.assertEqual(resp.json['uid'], trace_uid) resp = self.app.get('/v1/traces', expect_errors=True) expected_msg = ('User "trace_view" doesn\'t have required permission "trace_list"') self.assertEqual(resp.status_code, http_client.FORBIDDEN) self.assertEqual(resp.json['faultstring'], expected_msg) def _insert_mock_models(self): trace_ids = [trace['id'] for trace in self.models['traces'].values()] return trace_ids
true
true
f703c79631f36821717c501608afedc68f7f2caf
154
py
Python
Lesson_2.py
Justmove08/Lesson_2
8f88619c0bf0140be9f4b8e24f7a7852758de55a
[ "MIT" ]
null
null
null
Lesson_2.py
Justmove08/Lesson_2
8f88619c0bf0140be9f4b8e24f7a7852758de55a
[ "MIT" ]
null
null
null
Lesson_2.py
Justmove08/Lesson_2
8f88619c0bf0140be9f4b8e24f7a7852758de55a
[ "MIT" ]
null
null
null
import requests import json url = "https://www.cbr-xml-daily.ru/daily_json.js" response = requests.get(url) data = json.loads(response.text) print(data)
19.25
50
0.753247
import requests import json url = "https://www.cbr-xml-daily.ru/daily_json.js" response = requests.get(url) data = json.loads(response.text) print(data)
true
true
f703c7f57d121c0c204bdc8eea1d7ea29ffacb56
727
py
Python
authors/apps/reports/models.py
andela/ah-backend-dojo
f2b14f15c4af906da846cafe722f13868d58371f
[ "BSD-3-Clause" ]
3
2019-05-01T10:41:09.000Z
2021-04-25T22:17:20.000Z
authors/apps/reports/models.py
andela/ah-backend-dojo
f2b14f15c4af906da846cafe722f13868d58371f
[ "BSD-3-Clause" ]
24
2019-04-23T14:56:21.000Z
2021-12-13T19:58:37.000Z
authors/apps/reports/models.py
andela/ah-backend-dojo
f2b14f15c4af906da846cafe722f13868d58371f
[ "BSD-3-Clause" ]
4
2019-06-29T10:40:32.000Z
2022-01-04T11:44:53.000Z
from django.db import models from authors import settings from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile # Create your models here. class ReportArticle(models.Model): """model for reporting an article""" reporter = models.ForeignKey(Profile, on_delete=models.CASCADE) article = models.ForeignKey(Article, to_field="slug", on_delete=models.CASCADE) violation_subject = models.CharField(max_length=100, blank=False, null=False) violation_report = models.CharField(max_length=300, blank=True, null=True) report_status = models.CharField(max_length=20, default='pending') submission_date = models.DateTimeField(auto_now_add=True, editable=False)
45.4375
83
0.779917
from django.db import models from authors import settings from authors.apps.articles.models import Article from authors.apps.profiles.models import Profile class ReportArticle(models.Model): reporter = models.ForeignKey(Profile, on_delete=models.CASCADE) article = models.ForeignKey(Article, to_field="slug", on_delete=models.CASCADE) violation_subject = models.CharField(max_length=100, blank=False, null=False) violation_report = models.CharField(max_length=300, blank=True, null=True) report_status = models.CharField(max_length=20, default='pending') submission_date = models.DateTimeField(auto_now_add=True, editable=False)
true
true
f703c8360e791059085f84845e7382a4d897f455
3,203
py
Python
code_demo/python_framework/3_app_class.py
PegasusWang/notebooks
78a88de2ed2e858d3b1f997d0e5155a37e70b82c
[ "MIT" ]
38
2018-02-14T05:40:17.000Z
2021-09-26T10:14:57.000Z
code_demo/python_framework/3_app_class.py
PegasusWang/notebooks
78a88de2ed2e858d3b1f997d0e5155a37e70b82c
[ "MIT" ]
null
null
null
code_demo/python_framework/3_app_class.py
PegasusWang/notebooks
78a88de2ed2e858d3b1f997d0e5155a37e70b82c
[ "MIT" ]
11
2018-02-13T22:52:59.000Z
2020-07-14T04:04:54.000Z
# -*- coding: utf-8 -*- import re from six.moves import http_client from six.moves import urllib from wsgiref.headers import Headers class Request(object): def __init__(self, environ): self.environ = environ @property def path(self): return self.environ['PATH_INFO'] @property def args(self): """ 把查询参数转成字典形式 """ get_arguments = urllib.parse.parse_qs(self.environ['QUERY_STRING']) return {k: v[0] for k, v in get_arguments.items()} class Response(object): def __init__(self, response=None, status=200, charset='utf-8', content_type='text/html'): self.response = [] if response is None else response self.charset = charset self.headers = Headers([]) content_type = '{content_type}; charset={charset}'.format(content_type=content_type, charset=charset) self.headers.add_header('content-type', content_type) self._status = status @property def status(self): status_string = http_client.responses.get(self._status, 'UNKNOWN') return '{status} {status_string}'.format(status=self._status, status_string=status_string) def __iter__(self): for val in self.response: if isinstance(val, bytes): yield val else: yield val.encode(self.charset) # 试试结合了 Resquest 和 Response 的新 application: def request_response_application(func): def application(environ, start_response): request = Request(environ) response = func(request) start_response( response.status, response.headers.items() ) return iter(response) return application class NotFoundError(Exception): """ url pattern not found """ pass class DecoratorRouter: def __init__(self): self.routing_table = [] # 保存 url pattern 和 可调用对象 def match(self, path): for (pattern, callback) in self.routing_table: m = re.match(pattern, path) if m: return (callback, m.groups()) raise NotFoundError() def __call__(self, pattern): def _(func): self.routing_table.append((pattern, func)) return _ routers = DecoratorRouter() @routers(r'/hello/(.*)/$') def hello(request, name): return Response("<h1>Hello, {name}</h1>".format(name=name)) @routers(r'/goodbye/(.*)/$') def goodbye(request, name): return Response("<h1>Goodbye, {name}</h1>".format(name=name)) class Application(object): def __init__(self, routers, **kwargs): self.routers = routers def __call__(self, environ, start_response): try: request = Request(environ) callback, args = routers.match(request.path) response = callback(request, *args) except NotFoundError: response = Response("<h1>Not found</h1>", status=404) start_response(response.status, response.headers.items()) return iter(response) application = Application(routers) if __name__ == '__main__': from wsgiref.simple_server import make_server httpd = make_server('127.0.0.1', 8000, application) httpd.serve_forever()
27.612069
109
0.634093
import re from six.moves import http_client from six.moves import urllib from wsgiref.headers import Headers class Request(object): def __init__(self, environ): self.environ = environ @property def path(self): return self.environ['PATH_INFO'] @property def args(self): get_arguments = urllib.parse.parse_qs(self.environ['QUERY_STRING']) return {k: v[0] for k, v in get_arguments.items()} class Response(object): def __init__(self, response=None, status=200, charset='utf-8', content_type='text/html'): self.response = [] if response is None else response self.charset = charset self.headers = Headers([]) content_type = '{content_type}; charset={charset}'.format(content_type=content_type, charset=charset) self.headers.add_header('content-type', content_type) self._status = status @property def status(self): status_string = http_client.responses.get(self._status, 'UNKNOWN') return '{status} {status_string}'.format(status=self._status, status_string=status_string) def __iter__(self): for val in self.response: if isinstance(val, bytes): yield val else: yield val.encode(self.charset) def request_response_application(func): def application(environ, start_response): request = Request(environ) response = func(request) start_response( response.status, response.headers.items() ) return iter(response) return application class NotFoundError(Exception): pass class DecoratorRouter: def __init__(self): self.routing_table = [] def match(self, path): for (pattern, callback) in self.routing_table: m = re.match(pattern, path) if m: return (callback, m.groups()) raise NotFoundError() def __call__(self, pattern): def _(func): self.routing_table.append((pattern, func)) return _ routers = DecoratorRouter() @routers(r'/hello/(.*)/$') def hello(request, name): return Response("<h1>Hello, {name}</h1>".format(name=name)) @routers(r'/goodbye/(.*)/$') def goodbye(request, name): return Response("<h1>Goodbye, {name}</h1>".format(name=name)) class Application(object): def __init__(self, routers, **kwargs): self.routers = routers def __call__(self, environ, start_response): try: request = Request(environ) callback, args = routers.match(request.path) response = callback(request, *args) except NotFoundError: response = Response("<h1>Not found</h1>", status=404) start_response(response.status, response.headers.items()) return iter(response) application = Application(routers) if __name__ == '__main__': from wsgiref.simple_server import make_server httpd = make_server('127.0.0.1', 8000, application) httpd.serve_forever()
true
true
f703c881bdbf9a59f2cf3170b2e0a56ebc6c9201
9,841
py
Python
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_ips_rule_settings.py
gvashchenkolineate/gvashchenkolineate_infra_trytravis
0fb18850afe0d8609693ba4b23f29c7cda17d97f
[ "MIT" ]
17
2017-06-07T23:15:01.000Z
2021-08-30T14:32:36.000Z
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_ips_rule_settings.py
gvashchenkolineate/gvashchenkolineate_infra_trytravis
0fb18850afe0d8609693ba4b23f29c7cda17d97f
[ "MIT" ]
9
2017-06-25T03:31:52.000Z
2021-05-17T23:43:12.000Z
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_ips_rule_settings.py
gvashchenkolineate/gvashchenkolineate_infra_trytravis
0fb18850afe0d8609693ba4b23f29c7cda17d97f
[ "MIT" ]
3
2018-05-26T21:31:22.000Z
2019-09-28T17:00:45.000Z
#!/usr/bin/python from __future__ import (absolute_import, division, print_function) # Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_ips_rule_settings short_description: Configure IPS rule setting in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify ips feature and rule_settings category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 state: description: - Indicates whether to create or remove the object. This attribute was present already in previous version in a deeper level. It has been moved out to this outer level. type: str required: false choices: - present - absent version_added: 2.9 ips_rule_settings: description: - Configure IPS rule setting. default: null type: dict suboptions: state: description: - B(Deprecated) - Starting with Ansible 2.9 we recommend using the top-level 'state' parameter. - HORIZONTALLINE - Indicates whether to create or remove the object. type: str required: false choices: - present - absent id: description: - Rule ID. required: true type: int ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure IPS rule setting. fortios_ips_rule_settings: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" ips_rule_settings: id: "3" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_ips_rule_settings_data(json): option_list = ['id'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def ips_rule_settings(data, fos): vdom = data['vdom'] if 'state' in data and data['state']: state = data['state'] elif 'state' in data['ips_rule_settings'] and data['ips_rule_settings']: state = data['ips_rule_settings']['state'] else: state = True ips_rule_settings_data = data['ips_rule_settings'] filtered_data = underscore_to_hyphen(filter_ips_rule_settings_data(ips_rule_settings_data)) if state == "present": return fos.set('ips', 'rule-settings', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('ips', 'rule-settings', mkey=filtered_data['id'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_ips(data, fos): if data['ips_rule_settings']: resp = ips_rule_settings(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "ips_rule_settings": { "required": False, "type": "dict", "default": None, "options": { "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "id": {"required": True, "type": "int"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) # legacy_mode refers to using fortiosapi instead of HTTPAPI legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_ips(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_ips(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
29.641566
99
0.612031
from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fortios_ips_rule_settings short_description: Configure IPS rule setting in Fortinet's FortiOS and FortiGate. description: - This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the user to set and modify ips feature and rule_settings category. Examples include all parameters and values need to be adjusted to datasources before usage. Tested with FOS v6.0.5 version_added: "2.8" author: - Miguel Angel Munoz (@mamunozgonzalez) - Nicolas Thomas (@thomnico) notes: - Requires fortiosapi library developed by Fortinet - Run as a local_action in your playbook requirements: - fortiosapi>=0.9.8 options: host: description: - FortiOS or FortiGate IP address. type: str required: false username: description: - FortiOS or FortiGate username. type: str required: false password: description: - FortiOS or FortiGate password. type: str default: "" vdom: description: - Virtual domain, among those defined previously. A vdom is a virtual instance of the FortiGate that can be configured and used as a different unit. type: str default: root https: description: - Indicates if the requests towards FortiGate must use HTTPS protocol. type: bool default: true ssl_verify: description: - Ensures FortiGate certificate must be verified by a proper CA. type: bool default: true version_added: 2.9 state: description: - Indicates whether to create or remove the object. This attribute was present already in previous version in a deeper level. It has been moved out to this outer level. type: str required: false choices: - present - absent version_added: 2.9 ips_rule_settings: description: - Configure IPS rule setting. default: null type: dict suboptions: state: description: - B(Deprecated) - Starting with Ansible 2.9 we recommend using the top-level 'state' parameter. - HORIZONTALLINE - Indicates whether to create or remove the object. type: str required: false choices: - present - absent id: description: - Rule ID. required: true type: int ''' EXAMPLES = ''' - hosts: localhost vars: host: "192.168.122.40" username: "admin" password: "" vdom: "root" ssl_verify: "False" tasks: - name: Configure IPS rule setting. fortios_ips_rule_settings: host: "{{ host }}" username: "{{ username }}" password: "{{ password }}" vdom: "{{ vdom }}" https: "False" state: "present" ips_rule_settings: id: "3" ''' RETURN = ''' build: description: Build number of the fortigate image returned: always type: str sample: '1547' http_method: description: Last method used to provision the content into FortiGate returned: always type: str sample: 'PUT' http_status: description: Last result given by FortiGate on last operation applied returned: always type: str sample: "200" mkey: description: Master key (id) used in the last call to FortiGate returned: success type: str sample: "id" name: description: Name of the table used to fulfill the request returned: always type: str sample: "urlfilter" path: description: Path of the table used to fulfill the request returned: always type: str sample: "webfilter" revision: description: Internal revision number returned: always type: str sample: "17.0.2.10658" serial: description: Serial number of the unit returned: always type: str sample: "FGVMEVYYQT3AB5352" status: description: Indication of the operation's result returned: always type: str sample: "success" vdom: description: Virtual domain used returned: always type: str sample: "root" version: description: Version of the FortiGate returned: always type: str sample: "v5.6.3" ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.fortios.fortios import FortiOSHandler from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG def login(data, fos): host = data['host'] username = data['username'] password = data['password'] ssl_verify = data['ssl_verify'] fos.debug('on') if 'https' in data and not data['https']: fos.https('off') else: fos.https('on') fos.login(host, username, password, verify=ssl_verify) def filter_ips_rule_settings_data(json): option_list = ['id'] dictionary = {} for attribute in option_list: if attribute in json and json[attribute] is not None: dictionary[attribute] = json[attribute] return dictionary def underscore_to_hyphen(data): if isinstance(data, list): for elem in data: elem = underscore_to_hyphen(elem) elif isinstance(data, dict): new_data = {} for k, v in data.items(): new_data[k.replace('_', '-')] = underscore_to_hyphen(v) data = new_data return data def ips_rule_settings(data, fos): vdom = data['vdom'] if 'state' in data and data['state']: state = data['state'] elif 'state' in data['ips_rule_settings'] and data['ips_rule_settings']: state = data['ips_rule_settings']['state'] else: state = True ips_rule_settings_data = data['ips_rule_settings'] filtered_data = underscore_to_hyphen(filter_ips_rule_settings_data(ips_rule_settings_data)) if state == "present": return fos.set('ips', 'rule-settings', data=filtered_data, vdom=vdom) elif state == "absent": return fos.delete('ips', 'rule-settings', mkey=filtered_data['id'], vdom=vdom) def is_successful_status(status): return status['status'] == "success" or \ status['http_method'] == "DELETE" and status['http_status'] == 404 def fortios_ips(data, fos): if data['ips_rule_settings']: resp = ips_rule_settings(data, fos) return not is_successful_status(resp), \ resp['status'] == "success", \ resp def main(): fields = { "host": {"required": False, "type": "str"}, "username": {"required": False, "type": "str"}, "password": {"required": False, "type": "str", "default": "", "no_log": True}, "vdom": {"required": False, "type": "str", "default": "root"}, "https": {"required": False, "type": "bool", "default": True}, "ssl_verify": {"required": False, "type": "bool", "default": True}, "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "ips_rule_settings": { "required": False, "type": "dict", "default": None, "options": { "state": {"required": False, "type": "str", "choices": ["present", "absent"]}, "id": {"required": True, "type": "int"} } } } module = AnsibleModule(argument_spec=fields, supports_check_mode=False) legacy_mode = 'host' in module.params and module.params['host'] is not None and \ 'username' in module.params and module.params['username'] is not None and \ 'password' in module.params and module.params['password'] is not None if not legacy_mode: if module._socket_path: connection = Connection(module._socket_path) fos = FortiOSHandler(connection) is_error, has_changed, result = fortios_ips(module.params, fos) else: module.fail_json(**FAIL_SOCKET_MSG) else: try: from fortiosapi import FortiOSAPI except ImportError: module.fail_json(msg="fortiosapi module is required") fos = FortiOSAPI() login(module.params, fos) is_error, has_changed, result = fortios_ips(module.params, fos) fos.logout() if not is_error: module.exit_json(changed=has_changed, meta=result) else: module.fail_json(msg="Error in repo", meta=result) if __name__ == '__main__': main()
true
true
f703ca77a30ca7350baf7d44bc4292d83426e83a
392
py
Python
MyModule.py
LvJC/cpp-libtorch
4a56dda616bde50423591e7a4d4d7be6a978f6bf
[ "Apache-2.0" ]
1
2020-03-05T10:07:44.000Z
2020-03-05T10:07:44.000Z
MyModule.py
LvJC/cpp-libtorch
4a56dda616bde50423591e7a4d4d7be6a978f6bf
[ "Apache-2.0" ]
1
2020-06-23T14:05:29.000Z
2020-06-23T14:05:29.000Z
MyModule.py
LvJC/cpp-libtorch
4a56dda616bde50423591e7a4d4d7be6a978f6bf
[ "Apache-2.0" ]
1
2019-12-19T14:01:28.000Z
2019-12-19T14:01:28.000Z
import torch import torchvision # An instance of your model. model = torchvision.models.resnet18() # An example input you would normally provide to your model's forward() method. example = torch.rand(1, 3, 224, 224) # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing. traced_script_module = torch.jit.trace(model, example) # save traced_script_module.save("model.pt")
26.133333
79
0.770408
import torch import torchvision model = torchvision.models.resnet18() example = torch.rand(1, 3, 224, 224) # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing. traced_script_module = torch.jit.trace(model, example) # save traced_script_module.save("model.pt")
true
true
f703cad52db07a4720007227ed997cccd7842588
3,153
py
Python
zerver/lib/dev_ldap_directory.py
ross-sysadm/zulip
674158b8170eddb71aa2210052f0d598752581ed
[ "Apache-2.0" ]
null
null
null
zerver/lib/dev_ldap_directory.py
ross-sysadm/zulip
674158b8170eddb71aa2210052f0d598752581ed
[ "Apache-2.0" ]
null
null
null
zerver/lib/dev_ldap_directory.py
ross-sysadm/zulip
674158b8170eddb71aa2210052f0d598752581ed
[ "Apache-2.0" ]
null
null
null
import glob import logging import os from typing import Any, Dict, List, Optional from django.conf import settings from zerver.lib.storage import static_path # See https://jackstromberg.com/2013/01/useraccountcontrol-attributeflag-values/ # for docs on what these values mean. LDAP_USER_ACCOUNT_CONTROL_NORMAL = '512' LDAP_USER_ACCOUNT_CONTROL_DISABLED = '514' def generate_dev_ldap_dir(mode: str, num_users: int=8) -> Dict[str, Dict[str, Any]]: mode = mode.lower() ldap_data = [] for i in range(1, num_users+1): name = 'LDAP User %d' % (i,) email = 'ldapuser%d@zulip.com' % (i,) phone_number = '999999999%d' % (i,) birthdate = '19%02d-%02d-%02d' % (i, i, i) ldap_data.append((name, email, phone_number, birthdate)) profile_images = [open(path, "rb").read() for path in glob.glob(os.path.join(static_path("images/team"), "*"))] ldap_dir = {} for i, user_data in enumerate(ldap_data): email = user_data[1].lower() email_username = email.split('@')[0] common_data = { 'cn': [user_data[0]], 'userPassword': [email_username], 'phoneNumber': [user_data[2]], 'birthDate': [user_data[3]], } if mode == 'a': ldap_dir['uid=' + email + ',ou=users,dc=zulip,dc=com'] = dict( uid=[email], thumbnailPhoto=[profile_images[i % len(profile_images)]], userAccountControl=[LDAP_USER_ACCOUNT_CONTROL_NORMAL], **common_data) elif mode == 'b': ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict( uid=[email_username], jpegPhoto=[profile_images[i % len(profile_images)]], **common_data) elif mode == 'c': ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict( uid=[email_username], email=[email], **common_data) return ldap_dir def init_fakeldap(directory: Optional[Dict[str, Dict[str, List[str]]]]=None) -> None: # nocoverage # We only use this in development. Importing mock inside # this function is an import time optimization, which # avoids the expensive import of the mock module (slow # because its dependency pbr uses pkgresources, which is # really slow to import.) from unittest import mock from fakeldap import MockLDAP # Silent `django_auth_ldap` logger in dev mode to avoid # spammy user not found log messages. ldap_auth_logger = logging.getLogger('django_auth_ldap') ldap_auth_logger.setLevel(logging.CRITICAL) fakeldap_logger = logging.getLogger('fakeldap') fakeldap_logger.setLevel(logging.CRITICAL) ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize') mock_initialize = ldap_patcher.start() mock_ldap = MockLDAP() mock_initialize.return_value = mock_ldap mock_ldap.directory = directory or generate_dev_ldap_dir(settings.FAKE_LDAP_MODE, settings.FAKE_LDAP_NUM_USERS)
38.925926
99
0.625119
import glob import logging import os from typing import Any, Dict, List, Optional from django.conf import settings from zerver.lib.storage import static_path LDAP_USER_ACCOUNT_CONTROL_NORMAL = '512' LDAP_USER_ACCOUNT_CONTROL_DISABLED = '514' def generate_dev_ldap_dir(mode: str, num_users: int=8) -> Dict[str, Dict[str, Any]]: mode = mode.lower() ldap_data = [] for i in range(1, num_users+1): name = 'LDAP User %d' % (i,) email = 'ldapuser%d@zulip.com' % (i,) phone_number = '999999999%d' % (i,) birthdate = '19%02d-%02d-%02d' % (i, i, i) ldap_data.append((name, email, phone_number, birthdate)) profile_images = [open(path, "rb").read() for path in glob.glob(os.path.join(static_path("images/team"), "*"))] ldap_dir = {} for i, user_data in enumerate(ldap_data): email = user_data[1].lower() email_username = email.split('@')[0] common_data = { 'cn': [user_data[0]], 'userPassword': [email_username], 'phoneNumber': [user_data[2]], 'birthDate': [user_data[3]], } if mode == 'a': ldap_dir['uid=' + email + ',ou=users,dc=zulip,dc=com'] = dict( uid=[email], thumbnailPhoto=[profile_images[i % len(profile_images)]], userAccountControl=[LDAP_USER_ACCOUNT_CONTROL_NORMAL], **common_data) elif mode == 'b': ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict( uid=[email_username], jpegPhoto=[profile_images[i % len(profile_images)]], **common_data) elif mode == 'c': ldap_dir['uid=' + email_username + ',ou=users,dc=zulip,dc=com'] = dict( uid=[email_username], email=[email], **common_data) return ldap_dir def init_fakeldap(directory: Optional[Dict[str, Dict[str, List[str]]]]=None) -> None: from unittest import mock from fakeldap import MockLDAP ldap_auth_logger = logging.getLogger('django_auth_ldap') ldap_auth_logger.setLevel(logging.CRITICAL) fakeldap_logger = logging.getLogger('fakeldap') fakeldap_logger.setLevel(logging.CRITICAL) ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize') mock_initialize = ldap_patcher.start() mock_ldap = MockLDAP() mock_initialize.return_value = mock_ldap mock_ldap.directory = directory or generate_dev_ldap_dir(settings.FAKE_LDAP_MODE, settings.FAKE_LDAP_NUM_USERS)
true
true
f703cb62e2704a49458ad5c25825835775dc1de1
6,656
py
Python
yatube/posts/views.py
VladimirDip/Yatube
2652bd1e73677ec6d10e7fd1153f65fa48ee8a35
[ "BSD-3-Clause" ]
null
null
null
yatube/posts/views.py
VladimirDip/Yatube
2652bd1e73677ec6d10e7fd1153f65fa48ee8a35
[ "BSD-3-Clause" ]
null
null
null
yatube/posts/views.py
VladimirDip/Yatube
2652bd1e73677ec6d10e7fd1153f65fa48ee8a35
[ "BSD-3-Clause" ]
null
null
null
from django.contrib.auth.decorators import login_required from django.contrib.auth.models import AnonymousUser from django.core.paginator import Paginator from django.shortcuts import render, redirect, get_object_or_404 from django.contrib import messages from django.views.decorators.cache import cache_page from django.db.models import Count from .forms import CreatePost, CreateComment from .models import Post, User, Comment, Follow def _create_paginator(request, post): paginator = Paginator(post, 10) page_number = request.GET.get("page") page = paginator.get_page(page_number) return page, paginator def _search_text(request): keyword = request.GET.get("q", None) posts_list = Post.objects.select_related( "author", "group").filter( text__contains=keyword ).prefetch_related("comments") data_paginator = _create_paginator(request, posts_list) return data_paginator @cache_page(20, key_prefix="index_page") def index(request): if request.GET.get("q") is None: posts_list = Post.objects.order_by("-pub_date")\ .all()\ .select_related("author", "group", )\ .prefetch_related("comments",) data_paginator = _create_paginator(request, posts_list) else: data_paginator = _search_text(request) return render(request, "index.html", {"page": data_paginator[0], "paginator": data_paginator[1], "title": "Последние обновления", "description": "Последние обновления на сайте", "changing_it": "index"}) @login_required def new_post(request): content = {"title_name": "Новый пост", "btn_name": "Добавить пост"} if request.method == "POST": form = CreatePost(request.POST, files=request.FILES or None) if form.is_valid(): author = request.user form.cleaned_data['author'] = author date_clean = form.cleaned_data post = Post.objects.create(**date_clean) messages.success(request, "Пост добавлен") return redirect("index") else: form = CreatePost() return render(request, "add_post.html", {"form": form, "content": content}) def profile(request, username): user_name = get_object_or_404(User, username=username) following = None if request.user != AnonymousUser(): following = Follow.objects.filter(user=request.user, author=user_name) print(following) posts = Post.objects.filter(author_id__username=user_name)\ .select_related("author", "group")\ .prefetch_related("comments") data_paginator = _create_paginator(request, posts) return render(request, "profile.html", {"page": data_paginator[0], "paginator": data_paginator[1], "author": user_name, "following": following}) def post_view(request, username, post_id): profile_person = get_object_or_404(User, username=username) print(type(profile_person)) select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id) # comments = select_post.comments.all() comments = list(Comment.objects.filter(post_id=post_id).select_related("author", "post")) return render(request, "post.html", {"user_post": select_post, "author": profile_person, "comments": comments}) def post_edit(request, username, post_id): content = {"title_name": "Редактировать запись", "btn_name": "Сохранить"} profile_person = get_object_or_404(User, username=username) select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id) if request.user != profile_person: return redirect("post", username=username, post_id=post_id) form = CreatePost(request.POST or None, instance=select_post, files=request.FILES or None) if form.is_valid(): form.save() print("Post can editable") return redirect("post", username=username, post_id=post_id) return render(request, "add_post.html", {"form": form, "selected_post": select_post, "content": content}) def page_not_found(request, exeption): return render(request, "misc/404.html", {"path": request.path}, status=404) def server_error(request): return render(request, "misc/500.html", status=500) @login_required def add_comment(request, username, post_id): profile_person = get_object_or_404(User, username=username) select_post = get_object_or_404(Post, pk=post_id, author=profile_person) if request.method == "POST": form = CreateComment(request.POST) print(form) if form.is_valid(): author = request.user form.cleaned_data["post"] = select_post form.cleaned_data["author"] = author data_clean = form.cleaned_data comment = Comment.objects.create(**data_clean) messages.success(request, "Коммент поставлен") return redirect("post", username=username, post_id=post_id) else: form = CreateComment() return render(request, "comments.html", {"form": form}) @login_required def follow_index(request): my_follow = Post.objects.filter(author__following__user=request.user)\ .select_related("author", "group")\ .prefetch_related("comments") data_paginator = _create_paginator(request, my_follow) return render(request, "index.html", {"page": data_paginator[0], "paginator": data_paginator[1], "title": "Подписки", "description": "Последние обновления твоих людей", "changing_it": "follow"}) @login_required def profile_follow(request, username): author = get_object_or_404(User, username=username) if request.user != author: Follow.objects.get_or_create(author=author, user=request.user) return redirect("profile", username=username) @login_required def profile_unfollow(request, username): author = get_object_or_404(User, username=username) if request.user != author: Follow.objects.filter(author=author, user=request.user).delete() return redirect('profile', username=username)
39.856287
93
0.629207
from django.contrib.auth.decorators import login_required from django.contrib.auth.models import AnonymousUser from django.core.paginator import Paginator from django.shortcuts import render, redirect, get_object_or_404 from django.contrib import messages from django.views.decorators.cache import cache_page from django.db.models import Count from .forms import CreatePost, CreateComment from .models import Post, User, Comment, Follow def _create_paginator(request, post): paginator = Paginator(post, 10) page_number = request.GET.get("page") page = paginator.get_page(page_number) return page, paginator def _search_text(request): keyword = request.GET.get("q", None) posts_list = Post.objects.select_related( "author", "group").filter( text__contains=keyword ).prefetch_related("comments") data_paginator = _create_paginator(request, posts_list) return data_paginator @cache_page(20, key_prefix="index_page") def index(request): if request.GET.get("q") is None: posts_list = Post.objects.order_by("-pub_date")\ .all()\ .select_related("author", "group", )\ .prefetch_related("comments",) data_paginator = _create_paginator(request, posts_list) else: data_paginator = _search_text(request) return render(request, "index.html", {"page": data_paginator[0], "paginator": data_paginator[1], "title": "Последние обновления", "description": "Последние обновления на сайте", "changing_it": "index"}) @login_required def new_post(request): content = {"title_name": "Новый пост", "btn_name": "Добавить пост"} if request.method == "POST": form = CreatePost(request.POST, files=request.FILES or None) if form.is_valid(): author = request.user form.cleaned_data['author'] = author date_clean = form.cleaned_data post = Post.objects.create(**date_clean) messages.success(request, "Пост добавлен") return redirect("index") else: form = CreatePost() return render(request, "add_post.html", {"form": form, "content": content}) def profile(request, username): user_name = get_object_or_404(User, username=username) following = None if request.user != AnonymousUser(): following = Follow.objects.filter(user=request.user, author=user_name) print(following) posts = Post.objects.filter(author_id__username=user_name)\ .select_related("author", "group")\ .prefetch_related("comments") data_paginator = _create_paginator(request, posts) return render(request, "profile.html", {"page": data_paginator[0], "paginator": data_paginator[1], "author": user_name, "following": following}) def post_view(request, username, post_id): profile_person = get_object_or_404(User, username=username) print(type(profile_person)) select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id) comments = list(Comment.objects.filter(post_id=post_id).select_related("author", "post")) return render(request, "post.html", {"user_post": select_post, "author": profile_person, "comments": comments}) def post_edit(request, username, post_id): content = {"title_name": "Редактировать запись", "btn_name": "Сохранить"} profile_person = get_object_or_404(User, username=username) select_post = get_object_or_404(Post, pk=post_id, author=profile_person.id) if request.user != profile_person: return redirect("post", username=username, post_id=post_id) form = CreatePost(request.POST or None, instance=select_post, files=request.FILES or None) if form.is_valid(): form.save() print("Post can editable") return redirect("post", username=username, post_id=post_id) return render(request, "add_post.html", {"form": form, "selected_post": select_post, "content": content}) def page_not_found(request, exeption): return render(request, "misc/404.html", {"path": request.path}, status=404) def server_error(request): return render(request, "misc/500.html", status=500) @login_required def add_comment(request, username, post_id): profile_person = get_object_or_404(User, username=username) select_post = get_object_or_404(Post, pk=post_id, author=profile_person) if request.method == "POST": form = CreateComment(request.POST) print(form) if form.is_valid(): author = request.user form.cleaned_data["post"] = select_post form.cleaned_data["author"] = author data_clean = form.cleaned_data comment = Comment.objects.create(**data_clean) messages.success(request, "Коммент поставлен") return redirect("post", username=username, post_id=post_id) else: form = CreateComment() return render(request, "comments.html", {"form": form}) @login_required def follow_index(request): my_follow = Post.objects.filter(author__following__user=request.user)\ .select_related("author", "group")\ .prefetch_related("comments") data_paginator = _create_paginator(request, my_follow) return render(request, "index.html", {"page": data_paginator[0], "paginator": data_paginator[1], "title": "Подписки", "description": "Последние обновления твоих людей", "changing_it": "follow"}) @login_required def profile_follow(request, username): author = get_object_or_404(User, username=username) if request.user != author: Follow.objects.get_or_create(author=author, user=request.user) return redirect("profile", username=username) @login_required def profile_unfollow(request, username): author = get_object_or_404(User, username=username) if request.user != author: Follow.objects.filter(author=author, user=request.user).delete() return redirect('profile', username=username)
true
true
f703cb77ea9e43fc26b5d0b603d2a528e44aeee5
1,608
py
Python
extviews/connections.py
BilalAlpaslan/fastapi-extviews
e3ce1c4916d86009705a09e165e5ee21a197962f
[ "MIT" ]
16
2022-01-01T16:00:58.000Z
2022-03-21T09:42:35.000Z
extviews/connections.py
BilalAlpaslan/fastapi-extviews
e3ce1c4916d86009705a09e165e5ee21a197962f
[ "MIT" ]
null
null
null
extviews/connections.py
BilalAlpaslan/fastapi-extviews
e3ce1c4916d86009705a09e165e5ee21a197962f
[ "MIT" ]
null
null
null
from motor.motor_asyncio import AsyncIOMotorClient from pymongo import MongoClient __all__ = ['PymongoConnection', 'MotorConnection'] class PymongoConnection: def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None): """Create database connection.""" if user and password: self.db_client = MongoClient(f"mongodb://{user}:{password}@{host}:{port}") else: self.db_client = MongoClient(f"mongodb://{host}:{port}") self.db_name = db def get_db_client(self) -> MongoClient: """Return database client instance.""" return self.db_client def get_db(self): """Return database instance.""" return self.get_db_client()[self.db_name] def close_db(self): """Close database connection.""" self.db_client.close() class MotorConnection: def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None): """Create database connection.""" if user and password: self.db_client = AsyncIOMotorClient(f"mongodb://{user}:{password}@{host}:{port}") else: self.db_client = AsyncIOMotorClient(f"mongodb://{host}:{port}") self.db_name = db def get_db_client(self) -> AsyncIOMotorClient: """Return database client instance.""" return self.db_client def get_db(self): """Return database instance.""" return self.get_db_client()[self.db_name] def close_db(self): """Close database connection.""" self.db_client.close()
32.816327
95
0.626866
from motor.motor_asyncio import AsyncIOMotorClient from pymongo import MongoClient __all__ = ['PymongoConnection', 'MotorConnection'] class PymongoConnection: def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None): if user and password: self.db_client = MongoClient(f"mongodb://{user}:{password}@{host}:{port}") else: self.db_client = MongoClient(f"mongodb://{host}:{port}") self.db_name = db def get_db_client(self) -> MongoClient: return self.db_client def get_db(self): return self.get_db_client()[self.db_name] def close_db(self): self.db_client.close() class MotorConnection: def __init__(self, host="127.0.0.1", port="27017", db="default", user=None, password=None): if user and password: self.db_client = AsyncIOMotorClient(f"mongodb://{user}:{password}@{host}:{port}") else: self.db_client = AsyncIOMotorClient(f"mongodb://{host}:{port}") self.db_name = db def get_db_client(self) -> AsyncIOMotorClient: return self.db_client def get_db(self): return self.get_db_client()[self.db_name] def close_db(self): self.db_client.close()
true
true
f703cbd34235ec29b24264756ef19d8f43b8582f
13,776
py
Python
env/lib/python3.8/site-packages/docker/utils/utils.py
projeto-de-algoritmos/Grafos2_IMDBConnection
5bcd9a631f6d871d2d9d3038a9904b9e930ac64a
[ "MIT" ]
1
2021-10-04T18:22:12.000Z
2021-10-04T18:22:12.000Z
env/lib/python3.8/site-packages/docker/utils/utils.py
projeto-de-algoritmos/Grafos2_IMDBConnection
5bcd9a631f6d871d2d9d3038a9904b9e930ac64a
[ "MIT" ]
10
2021-06-16T20:48:32.000Z
2021-10-04T18:22:02.000Z
env/lib/python3.8/site-packages/docker/utils/utils.py
projeto-de-algoritmos/Grafos2_IMDBConnection
5bcd9a631f6d871d2d9d3038a9904b9e930ac64a
[ "MIT" ]
null
null
null
import base64 import json import os import os.path import shlex import string from datetime import datetime from distutils.version import StrictVersion from .. import errors from .. import tls from ..constants import DEFAULT_HTTP_HOST from ..constants import DEFAULT_UNIX_SOCKET from ..constants import DEFAULT_NPIPE from ..constants import BYTE_UNITS from urllib.parse import splitnport, urlparse def create_ipam_pool(*args, **kwargs): raise errors.DeprecatedMethod( 'utils.create_ipam_pool has been removed. Please use a ' 'docker.types.IPAMPool object instead.' ) def create_ipam_config(*args, **kwargs): raise errors.DeprecatedMethod( 'utils.create_ipam_config has been removed. Please use a ' 'docker.types.IPAMConfig object instead.' ) def decode_json_header(header): data = base64.b64decode(header) data = data.decode('utf-8') return json.loads(data) def compare_version(v1, v2): """Compare docker versions >>> v1 = '1.9' >>> v2 = '1.10' >>> compare_version(v1, v2) 1 >>> compare_version(v2, v1) -1 >>> compare_version(v2, v2) 0 """ s1 = StrictVersion(v1) s2 = StrictVersion(v2) if s1 == s2: return 0 elif s1 > s2: return -1 else: return 1 def version_lt(v1, v2): return compare_version(v1, v2) > 0 def version_gte(v1, v2): return not version_lt(v1, v2) def _convert_port_binding(binding): result = {'HostIp': '', 'HostPort': ''} if isinstance(binding, tuple): if len(binding) == 2: result['HostPort'] = binding[1] result['HostIp'] = binding[0] elif isinstance(binding[0], str): result['HostIp'] = binding[0] else: result['HostPort'] = binding[0] elif isinstance(binding, dict): if 'HostPort' in binding: result['HostPort'] = binding['HostPort'] if 'HostIp' in binding: result['HostIp'] = binding['HostIp'] else: raise ValueError(binding) else: result['HostPort'] = binding if result['HostPort'] is None: result['HostPort'] = '' else: result['HostPort'] = str(result['HostPort']) return result def convert_port_bindings(port_bindings): result = {} for k, v in iter(port_bindings.items()): key = str(k) if '/' not in key: key += '/tcp' if isinstance(v, list): result[key] = [_convert_port_binding(binding) for binding in v] else: result[key] = [_convert_port_binding(v)] return result def convert_volume_binds(binds): if isinstance(binds, list): return binds result = [] for k, v in binds.items(): if isinstance(k, bytes): k = k.decode('utf-8') if isinstance(v, dict): if 'ro' in v and 'mode' in v: raise ValueError( 'Binding cannot contain both "ro" and "mode": {}' .format(repr(v)) ) bind = v['bind'] if isinstance(bind, bytes): bind = bind.decode('utf-8') if 'ro' in v: mode = 'ro' if v['ro'] else 'rw' elif 'mode' in v: mode = v['mode'] else: mode = 'rw' result.append( str('{0}:{1}:{2}').format(k, bind, mode) ) else: if isinstance(v, bytes): v = v.decode('utf-8') result.append( str('{0}:{1}:rw').format(k, v) ) return result def convert_tmpfs_mounts(tmpfs): if isinstance(tmpfs, dict): return tmpfs if not isinstance(tmpfs, list): raise ValueError( 'Expected tmpfs value to be either a list or a dict, found: {}' .format(type(tmpfs).__name__) ) result = {} for mount in tmpfs: if isinstance(mount, str): if ":" in mount: name, options = mount.split(":", 1) else: name = mount options = "" else: raise ValueError( "Expected item in tmpfs list to be a string, found: {}" .format(type(mount).__name__) ) result[name] = options return result def convert_service_networks(networks): if not networks: return networks if not isinstance(networks, list): raise TypeError('networks parameter must be a list.') result = [] for n in networks: if isinstance(n, str): n = {'Target': n} result.append(n) return result def parse_repository_tag(repo_name): parts = repo_name.rsplit('@', 1) if len(parts) == 2: return tuple(parts) parts = repo_name.rsplit(':', 1) if len(parts) == 2 and '/' not in parts[1]: return tuple(parts) return repo_name, None def parse_host(addr, is_win32=False, tls=False): path = '' port = None host = None # Sensible defaults if not addr and is_win32: return DEFAULT_NPIPE if not addr or addr.strip() == 'unix://': return DEFAULT_UNIX_SOCKET addr = addr.strip() parsed_url = urlparse(addr) proto = parsed_url.scheme if not proto or any([x not in string.ascii_letters + '+' for x in proto]): # https://bugs.python.org/issue754016 parsed_url = urlparse('//' + addr, 'tcp') proto = 'tcp' if proto == 'fd': raise errors.DockerException('fd protocol is not implemented') # These protos are valid aliases for our library but not for the # official spec if proto == 'http' or proto == 'https': tls = proto == 'https' proto = 'tcp' elif proto == 'http+unix': proto = 'unix' if proto not in ('tcp', 'unix', 'npipe', 'ssh'): raise errors.DockerException( "Invalid bind address protocol: {}".format(addr) ) if proto == 'tcp' and not parsed_url.netloc: # "tcp://" is exceptionally disallowed by convention; # omitting a hostname for other protocols is fine raise errors.DockerException( 'Invalid bind address format: {}'.format(addr) ) if any([ parsed_url.params, parsed_url.query, parsed_url.fragment, parsed_url.password ]): raise errors.DockerException( 'Invalid bind address format: {}'.format(addr) ) if parsed_url.path and proto == 'ssh': raise errors.DockerException( 'Invalid bind address format: no path allowed for this protocol:' ' {}'.format(addr) ) else: path = parsed_url.path if proto == 'unix' and parsed_url.hostname is not None: # For legacy reasons, we consider unix://path # to be valid and equivalent to unix:///path path = '/'.join((parsed_url.hostname, path)) if proto in ('tcp', 'ssh'): # parsed_url.hostname strips brackets from IPv6 addresses, # which can be problematic hence our use of splitnport() instead. host, port = splitnport(parsed_url.netloc) if port is None or port < 0: if proto != 'ssh': raise errors.DockerException( 'Invalid bind address format: port is required:' ' {}'.format(addr) ) port = 22 if not host: host = DEFAULT_HTTP_HOST # Rewrite schemes to fit library internals (requests adapters) if proto == 'tcp': proto = 'http{}'.format('s' if tls else '') elif proto == 'unix': proto = 'http+unix' if proto in ('http+unix', 'npipe'): return "{}://{}".format(proto, path).rstrip('/') return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/') def parse_devices(devices): device_list = [] for device in devices: if isinstance(device, dict): device_list.append(device) continue if not isinstance(device, str): raise errors.DockerException( 'Invalid device type {0}'.format(type(device)) ) device_mapping = device.split(':') if device_mapping: path_on_host = device_mapping[0] if len(device_mapping) > 1: path_in_container = device_mapping[1] else: path_in_container = path_on_host if len(device_mapping) > 2: permissions = device_mapping[2] else: permissions = 'rwm' device_list.append({ 'PathOnHost': path_on_host, 'PathInContainer': path_in_container, 'CgroupPermissions': permissions }) return device_list def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None): if not environment: environment = os.environ host = environment.get('DOCKER_HOST') # empty string for cert path is the same as unset. cert_path = environment.get('DOCKER_CERT_PATH') or None # empty string for tls verify counts as "false". # Any value or 'unset' counts as true. tls_verify = environment.get('DOCKER_TLS_VERIFY') if tls_verify == '': tls_verify = False else: tls_verify = tls_verify is not None enable_tls = cert_path or tls_verify params = {} if host: params['base_url'] = host if not enable_tls: return params if not cert_path: cert_path = os.path.join(os.path.expanduser('~'), '.docker') if not tls_verify and assert_hostname is None: # assert_hostname is a subset of TLS verification, # so if it's not set already then set it to false. assert_hostname = False params['tls'] = tls.TLSConfig( client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')), ca_cert=os.path.join(cert_path, 'ca.pem'), verify=tls_verify, ssl_version=ssl_version, assert_hostname=assert_hostname, ) return params def convert_filters(filters): result = {} for k, v in iter(filters.items()): if isinstance(v, bool): v = 'true' if v else 'false' if not isinstance(v, list): v = [v, ] result[k] = [ str(item) if not isinstance(item, str) else item for item in v ] return json.dumps(result) def datetime_to_timestamp(dt): """Convert a UTC datetime to a Unix timestamp""" delta = dt - datetime.utcfromtimestamp(0) return delta.seconds + delta.days * 24 * 3600 def parse_bytes(s): if isinstance(s, (int, float,)): return s if len(s) == 0: return 0 if s[-2:-1].isalpha() and s[-1].isalpha(): if s[-1] == "b" or s[-1] == "B": s = s[:-1] units = BYTE_UNITS suffix = s[-1].lower() # Check if the variable is a string representation of an int # without a units part. Assuming that the units are bytes. if suffix.isdigit(): digits_part = s suffix = 'b' else: digits_part = s[:-1] if suffix in units.keys() or suffix.isdigit(): try: digits = float(digits_part) except ValueError: raise errors.DockerException( 'Failed converting the string value for memory ({0}) to' ' an integer.'.format(digits_part) ) # Reconvert to long for the final result s = int(digits * units[suffix]) else: raise errors.DockerException( 'The specified value for memory ({0}) should specify the' ' units. The postfix should be one of the `b` `k` `m` `g`' ' characters'.format(s) ) return s def normalize_links(links): if isinstance(links, dict): links = iter(links.items()) return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)] def parse_env_file(env_file): """ Reads a line-separated environment file. The format of each line should be "key=value". """ environment = {} with open(env_file, 'r') as f: for line in f: if line[0] == '#': continue line = line.strip() if not line: continue parse_line = line.split('=', 1) if len(parse_line) == 2: k, v = parse_line environment[k] = v else: raise errors.DockerException( 'Invalid line in environment file {0}:\n{1}'.format( env_file, line)) return environment def split_command(command): return shlex.split(command) def format_environment(environment): def format_env(key, value): if value is None: return key if isinstance(value, bytes): value = value.decode('utf-8') return u'{key}={value}'.format(key=key, value=value) return [format_env(*var) for var in iter(environment.items())] def format_extra_hosts(extra_hosts, task=False): # Use format dictated by Swarm API if container is part of a task if task: return [ '{} {}'.format(v, k) for k, v in sorted(iter(extra_hosts.items())) ] return [ '{}:{}'.format(k, v) for k, v in sorted(iter(extra_hosts.items())) ] def create_host_config(self, *args, **kwargs): raise errors.DeprecatedMethod( 'utils.create_host_config has been removed. Please use a ' 'docker.types.HostConfig object instead.' )
27.830303
78
0.564387
import base64 import json import os import os.path import shlex import string from datetime import datetime from distutils.version import StrictVersion from .. import errors from .. import tls from ..constants import DEFAULT_HTTP_HOST from ..constants import DEFAULT_UNIX_SOCKET from ..constants import DEFAULT_NPIPE from ..constants import BYTE_UNITS from urllib.parse import splitnport, urlparse def create_ipam_pool(*args, **kwargs): raise errors.DeprecatedMethod( 'utils.create_ipam_pool has been removed. Please use a ' 'docker.types.IPAMPool object instead.' ) def create_ipam_config(*args, **kwargs): raise errors.DeprecatedMethod( 'utils.create_ipam_config has been removed. Please use a ' 'docker.types.IPAMConfig object instead.' ) def decode_json_header(header): data = base64.b64decode(header) data = data.decode('utf-8') return json.loads(data) def compare_version(v1, v2): s1 = StrictVersion(v1) s2 = StrictVersion(v2) if s1 == s2: return 0 elif s1 > s2: return -1 else: return 1 def version_lt(v1, v2): return compare_version(v1, v2) > 0 def version_gte(v1, v2): return not version_lt(v1, v2) def _convert_port_binding(binding): result = {'HostIp': '', 'HostPort': ''} if isinstance(binding, tuple): if len(binding) == 2: result['HostPort'] = binding[1] result['HostIp'] = binding[0] elif isinstance(binding[0], str): result['HostIp'] = binding[0] else: result['HostPort'] = binding[0] elif isinstance(binding, dict): if 'HostPort' in binding: result['HostPort'] = binding['HostPort'] if 'HostIp' in binding: result['HostIp'] = binding['HostIp'] else: raise ValueError(binding) else: result['HostPort'] = binding if result['HostPort'] is None: result['HostPort'] = '' else: result['HostPort'] = str(result['HostPort']) return result def convert_port_bindings(port_bindings): result = {} for k, v in iter(port_bindings.items()): key = str(k) if '/' not in key: key += '/tcp' if isinstance(v, list): result[key] = [_convert_port_binding(binding) for binding in v] else: result[key] = [_convert_port_binding(v)] return result def convert_volume_binds(binds): if isinstance(binds, list): return binds result = [] for k, v in binds.items(): if isinstance(k, bytes): k = k.decode('utf-8') if isinstance(v, dict): if 'ro' in v and 'mode' in v: raise ValueError( 'Binding cannot contain both "ro" and "mode": {}' .format(repr(v)) ) bind = v['bind'] if isinstance(bind, bytes): bind = bind.decode('utf-8') if 'ro' in v: mode = 'ro' if v['ro'] else 'rw' elif 'mode' in v: mode = v['mode'] else: mode = 'rw' result.append( str('{0}:{1}:{2}').format(k, bind, mode) ) else: if isinstance(v, bytes): v = v.decode('utf-8') result.append( str('{0}:{1}:rw').format(k, v) ) return result def convert_tmpfs_mounts(tmpfs): if isinstance(tmpfs, dict): return tmpfs if not isinstance(tmpfs, list): raise ValueError( 'Expected tmpfs value to be either a list or a dict, found: {}' .format(type(tmpfs).__name__) ) result = {} for mount in tmpfs: if isinstance(mount, str): if ":" in mount: name, options = mount.split(":", 1) else: name = mount options = "" else: raise ValueError( "Expected item in tmpfs list to be a string, found: {}" .format(type(mount).__name__) ) result[name] = options return result def convert_service_networks(networks): if not networks: return networks if not isinstance(networks, list): raise TypeError('networks parameter must be a list.') result = [] for n in networks: if isinstance(n, str): n = {'Target': n} result.append(n) return result def parse_repository_tag(repo_name): parts = repo_name.rsplit('@', 1) if len(parts) == 2: return tuple(parts) parts = repo_name.rsplit(':', 1) if len(parts) == 2 and '/' not in parts[1]: return tuple(parts) return repo_name, None def parse_host(addr, is_win32=False, tls=False): path = '' port = None host = None if not addr and is_win32: return DEFAULT_NPIPE if not addr or addr.strip() == 'unix://': return DEFAULT_UNIX_SOCKET addr = addr.strip() parsed_url = urlparse(addr) proto = parsed_url.scheme if not proto or any([x not in string.ascii_letters + '+' for x in proto]): parsed_url = urlparse('//' + addr, 'tcp') proto = 'tcp' if proto == 'fd': raise errors.DockerException('fd protocol is not implemented') if proto == 'http' or proto == 'https': tls = proto == 'https' proto = 'tcp' elif proto == 'http+unix': proto = 'unix' if proto not in ('tcp', 'unix', 'npipe', 'ssh'): raise errors.DockerException( "Invalid bind address protocol: {}".format(addr) ) if proto == 'tcp' and not parsed_url.netloc: raise errors.DockerException( 'Invalid bind address format: {}'.format(addr) ) if any([ parsed_url.params, parsed_url.query, parsed_url.fragment, parsed_url.password ]): raise errors.DockerException( 'Invalid bind address format: {}'.format(addr) ) if parsed_url.path and proto == 'ssh': raise errors.DockerException( 'Invalid bind address format: no path allowed for this protocol:' ' {}'.format(addr) ) else: path = parsed_url.path if proto == 'unix' and parsed_url.hostname is not None: path = '/'.join((parsed_url.hostname, path)) if proto in ('tcp', 'ssh'): host, port = splitnport(parsed_url.netloc) if port is None or port < 0: if proto != 'ssh': raise errors.DockerException( 'Invalid bind address format: port is required:' ' {}'.format(addr) ) port = 22 if not host: host = DEFAULT_HTTP_HOST if proto == 'tcp': proto = 'http{}'.format('s' if tls else '') elif proto == 'unix': proto = 'http+unix' if proto in ('http+unix', 'npipe'): return "{}://{}".format(proto, path).rstrip('/') return '{0}://{1}:{2}{3}'.format(proto, host, port, path).rstrip('/') def parse_devices(devices): device_list = [] for device in devices: if isinstance(device, dict): device_list.append(device) continue if not isinstance(device, str): raise errors.DockerException( 'Invalid device type {0}'.format(type(device)) ) device_mapping = device.split(':') if device_mapping: path_on_host = device_mapping[0] if len(device_mapping) > 1: path_in_container = device_mapping[1] else: path_in_container = path_on_host if len(device_mapping) > 2: permissions = device_mapping[2] else: permissions = 'rwm' device_list.append({ 'PathOnHost': path_on_host, 'PathInContainer': path_in_container, 'CgroupPermissions': permissions }) return device_list def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None): if not environment: environment = os.environ host = environment.get('DOCKER_HOST') cert_path = environment.get('DOCKER_CERT_PATH') or None tls_verify = environment.get('DOCKER_TLS_VERIFY') if tls_verify == '': tls_verify = False else: tls_verify = tls_verify is not None enable_tls = cert_path or tls_verify params = {} if host: params['base_url'] = host if not enable_tls: return params if not cert_path: cert_path = os.path.join(os.path.expanduser('~'), '.docker') if not tls_verify and assert_hostname is None: assert_hostname = False params['tls'] = tls.TLSConfig( client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')), ca_cert=os.path.join(cert_path, 'ca.pem'), verify=tls_verify, ssl_version=ssl_version, assert_hostname=assert_hostname, ) return params def convert_filters(filters): result = {} for k, v in iter(filters.items()): if isinstance(v, bool): v = 'true' if v else 'false' if not isinstance(v, list): v = [v, ] result[k] = [ str(item) if not isinstance(item, str) else item for item in v ] return json.dumps(result) def datetime_to_timestamp(dt): delta = dt - datetime.utcfromtimestamp(0) return delta.seconds + delta.days * 24 * 3600 def parse_bytes(s): if isinstance(s, (int, float,)): return s if len(s) == 0: return 0 if s[-2:-1].isalpha() and s[-1].isalpha(): if s[-1] == "b" or s[-1] == "B": s = s[:-1] units = BYTE_UNITS suffix = s[-1].lower() # Check if the variable is a string representation of an int # without a units part. Assuming that the units are bytes. if suffix.isdigit(): digits_part = s suffix = 'b' else: digits_part = s[:-1] if suffix in units.keys() or suffix.isdigit(): try: digits = float(digits_part) except ValueError: raise errors.DockerException( 'Failed converting the string value for memory ({0}) to' ' an integer.'.format(digits_part) ) # Reconvert to long for the final result s = int(digits * units[suffix]) else: raise errors.DockerException( 'The specified value for memory ({0}) should specify the' ' units. The postfix should be one of the `b` `k` `m` `g`' ' characters'.format(s) ) return s def normalize_links(links): if isinstance(links, dict): links = iter(links.items()) return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)] def parse_env_file(env_file): environment = {} with open(env_file, 'r') as f: for line in f: if line[0] == ' continue line = line.strip() if not line: continue parse_line = line.split('=', 1) if len(parse_line) == 2: k, v = parse_line environment[k] = v else: raise errors.DockerException( 'Invalid line in environment file {0}:\n{1}'.format( env_file, line)) return environment def split_command(command): return shlex.split(command) def format_environment(environment): def format_env(key, value): if value is None: return key if isinstance(value, bytes): value = value.decode('utf-8') return u'{key}={value}'.format(key=key, value=value) return [format_env(*var) for var in iter(environment.items())] def format_extra_hosts(extra_hosts, task=False): # Use format dictated by Swarm API if container is part of a task if task: return [ '{} {}'.format(v, k) for k, v in sorted(iter(extra_hosts.items())) ] return [ '{}:{}'.format(k, v) for k, v in sorted(iter(extra_hosts.items())) ] def create_host_config(self, *args, **kwargs): raise errors.DeprecatedMethod( 'utils.create_host_config has been removed. Please use a ' 'docker.types.HostConfig object instead.' )
true
true
f703cbd8080c7a50635529e13a3dc9a8386266d3
13,462
py
Python
airbyte-cdk/python/airbyte_cdk/models/airbyte_protocol.py
Danucas/airbyte
9e77879a7a3b1a5a559a3df9fa85056365b6fbef
[ "MIT" ]
1
2022-03-29T01:08:58.000Z
2022-03-29T01:08:58.000Z
airbyte-cdk/python/airbyte_cdk/models/airbyte_protocol.py
Danucas/airbyte
9e77879a7a3b1a5a559a3df9fa85056365b6fbef
[ "MIT" ]
5
2022-02-22T14:49:48.000Z
2022-03-19T10:43:08.000Z
airbyte-cdk/python/airbyte_cdk/models/airbyte_protocol.py
Danucas/airbyte
9e77879a7a3b1a5a559a3df9fa85056365b6fbef
[ "MIT" ]
1
2022-03-11T06:21:24.000Z
2022-03-11T06:21:24.000Z
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # # generated by datamodel-codegen: # filename: airbyte_protocol.yaml from __future__ import annotations from enum import Enum from typing import Any, Dict, List, Optional, Union from pydantic import AnyUrl, BaseModel, Extra, Field class Type(Enum): RECORD = "RECORD" STATE = "STATE" LOG = "LOG" SPEC = "SPEC" CONNECTION_STATUS = "CONNECTION_STATUS" CATALOG = "CATALOG" class AirbyteRecordMessage(BaseModel): class Config: extra = Extra.allow stream: str = Field(..., description="the name of this record's stream") data: Dict[str, Any] = Field(..., description="the record data") emitted_at: int = Field( ..., description="when the data was emitted from the source. epoch in millisecond.", ) namespace: Optional[str] = Field(None, description="the namespace of this record's stream") class AirbyteStateMessage(BaseModel): class Config: extra = Extra.allow data: Dict[str, Any] = Field(..., description="the state data") class Level(Enum): FATAL = "FATAL" ERROR = "ERROR" WARN = "WARN" INFO = "INFO" DEBUG = "DEBUG" TRACE = "TRACE" class AirbyteLogMessage(BaseModel): class Config: extra = Extra.allow level: Level = Field(..., description="the type of logging") message: str = Field(..., description="the log message") class Status(Enum): SUCCEEDED = "SUCCEEDED" FAILED = "FAILED" class AirbyteConnectionStatus(BaseModel): class Config: extra = Extra.allow status: Status message: Optional[str] = None class SyncMode(Enum): full_refresh = "full_refresh" incremental = "incremental" class DestinationSyncMode(Enum): append = "append" overwrite = "overwrite" append_dedup = "append_dedup" class OAuth2Specification(BaseModel): class Config: extra = Extra.allow rootObject: Optional[List[Union[str, int]]] = Field( None, description="A list of strings representing a pointer to the root object which contains any oauth parameters in the ConnectorSpecification.\nExamples:\nif oauth parameters were contained inside the top level, rootObject=[] If they were nested inside another object {'credentials': {'app_id' etc...}, rootObject=['credentials'] If they were inside a oneOf {'switch': {oneOf: [{client_id...}, {non_oauth_param]}}, rootObject=['switch', 0] ", ) oauthFlowInitParameters: Optional[List[List[str]]] = Field( None, description="Pointers to the fields in the rootObject needed to obtain the initial refresh/access tokens for the OAuth flow. Each inner array represents the path in the rootObject of the referenced field. For example. Assume the rootObject contains params 'app_secret', 'app_id' which are needed to get the initial refresh token. If they are not nested in the rootObject, then the array would look like this [['app_secret'], ['app_id']] If they are nested inside an object called 'auth_params' then this array would be [['auth_params', 'app_secret'], ['auth_params', 'app_id']]", ) oauthFlowOutputParameters: Optional[List[List[str]]] = Field( None, description="Pointers to the fields in the rootObject which can be populated from successfully completing the oauth flow using the init parameters. This is typically a refresh/access token. Each inner array represents the path in the rootObject of the referenced field.", ) class AuthType(Enum): oauth2_0 = "oauth2.0" class AuthSpecification(BaseModel): auth_type: Optional[AuthType] = None oauth2Specification: Optional[OAuth2Specification] = Field( None, description="If the connector supports OAuth, this field should be non-null.", ) class AuthFlowType(Enum): oauth2_0 = "oauth2.0" oauth1_0 = "oauth1.0" class OAuthConfigSpecification(BaseModel): oauth_user_input_from_connector_config_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations used as input to OAuth.\nMust be a valid non-nested JSON that refers to properties from ConnectorSpecification.connectionSpecification\nusing special annotation 'path_in_connector_config'.\nThese are input values the user is entering through the UI to authenticate to the connector, that might also shared\nas inputs for syncing data via the connector.\n\nExamples:\n\nif no connector values is shared during oauth flow, oauth_user_input_from_connector_config_specification=[]\nif connector values such as 'app_id' inside the top level are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['app_id']\n }\n }\nif connector values such as 'info.app_id' nested inside another object are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['info', 'app_id']\n }\n }", ) complete_oauth_output_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations produced by the OAuth flows as they are\nreturned by the distant OAuth APIs.\nMust be a valid JSON describing the fields to merge back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_output_specification={\n refresh_token: {\n type: string,\n path_in_connector_config: ['credentials', 'refresh_token']\n }\n }", ) complete_oauth_server_input_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations.\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nserver when completing an OAuth flow (typically exchanging an auth code for refresh token).\n\nExamples:\n\n complete_oauth_server_input_specification={\n client_id: {\n type: string\n },\n client_secret: {\n type: string\n }\n }", ) complete_oauth_server_output_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations that\nalso need to be merged back into the connector configuration at runtime.\nThis is a subset configuration of `complete_oauth_server_input_specification` that filters fields out to retain only the ones that\nare necessary for the connector to function with OAuth. (some fields could be used during oauth flows but not needed afterwards, therefore\nthey would be listed in the `complete_oauth_server_input_specification` but not `complete_oauth_server_output_specification`)\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nconnector when using OAuth flow APIs.\nThese fields are to be merged back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_server_output_specification={\n client_id: {\n type: string,\n path_in_connector_config: ['credentials', 'client_id']\n },\n client_secret: {\n type: string,\n path_in_connector_config: ['credentials', 'client_secret']\n }\n }", ) class AirbyteStream(BaseModel): class Config: extra = Extra.allow name: str = Field(..., description="Stream's name.") json_schema: Dict[str, Any] = Field(..., description="Stream schema using Json Schema specs.") supported_sync_modes: Optional[List[SyncMode]] = None source_defined_cursor: Optional[bool] = Field( None, description="If the source defines the cursor field, then any other cursor field inputs will be ignored. If it does not, either the user_provided one is used, or the default one is used as a backup.", ) default_cursor_field: Optional[List[str]] = Field( None, description="Path to the field that will be used to determine if a record is new or modified since the last sync. If not provided by the source, the end user will have to specify the comparable themselves.", ) source_defined_primary_key: Optional[List[List[str]]] = Field( None, description="If the source defines the primary key, paths to the fields that will be used as a primary key. If not provided by the source, the end user will have to specify the primary key themselves.", ) namespace: Optional[str] = Field( None, description="Optional Source-defined namespace. Currently only used by JDBC destinations to determine what schema to write to. Airbyte streams from the same sources should have the same namespace.", ) class ConfiguredAirbyteStream(BaseModel): class Config: extra = Extra.allow stream: AirbyteStream sync_mode: SyncMode cursor_field: Optional[List[str]] = Field( None, description="Path to the field that will be used to determine if a record is new or modified since the last sync. This field is REQUIRED if `sync_mode` is `incremental`. Otherwise it is ignored.", ) destination_sync_mode: DestinationSyncMode primary_key: Optional[List[List[str]]] = Field( None, description="Paths to the fields that will be used as primary key. This field is REQUIRED if `destination_sync_mode` is `*_dedup`. Otherwise it is ignored.", ) class AdvancedAuth(BaseModel): auth_flow_type: Optional[AuthFlowType] = None predicate_key: Optional[List[str]] = Field( None, description="Json Path to a field in the connectorSpecification that should exist for the advanced auth to be applicable.", ) predicate_value: Optional[str] = Field( None, description="Value of the predicate_key fields for the advanced auth to be applicable.", ) oauth_config_specification: Optional[OAuthConfigSpecification] = None class ConnectorSpecification(BaseModel): class Config: extra = Extra.allow documentationUrl: Optional[AnyUrl] = None changelogUrl: Optional[AnyUrl] = None connectionSpecification: Dict[str, Any] = Field( ..., description="ConnectorDefinition specific blob. Must be a valid JSON string.", ) supportsIncremental: Optional[bool] = Field(None, description="If the connector supports incremental mode or not.") supportsNormalization: Optional[bool] = Field(False, description="If the connector supports normalization or not.") supportsDBT: Optional[bool] = Field(False, description="If the connector supports DBT or not.") supported_destination_sync_modes: Optional[List[DestinationSyncMode]] = Field( None, description="List of destination sync modes supported by the connector" ) authSpecification: Optional[AuthSpecification] = Field(None, description="deprecated, switching to advanced_auth instead") advanced_auth: Optional[AdvancedAuth] = Field( None, description="Additional and optional specification object to describe what an 'advanced' Auth flow would need to function.\n - A connector should be able to fully function with the configuration as described by the ConnectorSpecification in a 'basic' mode.\n - The 'advanced' mode provides easier UX for the user with UI improvements and automations. However, this requires further setup on the\n server side by instance or workspace admins beforehand. The trade-off is that the user does not have to provide as many technical\n inputs anymore and the auth process is faster and easier to complete.", ) class AirbyteCatalog(BaseModel): class Config: extra = Extra.allow streams: List[AirbyteStream] class ConfiguredAirbyteCatalog(BaseModel): class Config: extra = Extra.allow streams: List[ConfiguredAirbyteStream] class AirbyteMessage(BaseModel): class Config: extra = Extra.allow type: Type = Field(..., description="Message type") log: Optional[AirbyteLogMessage] = Field( None, description="log message: any kind of logging you want the platform to know about.", ) spec: Optional[ConnectorSpecification] = None connectionStatus: Optional[AirbyteConnectionStatus] = None catalog: Optional[AirbyteCatalog] = Field(None, description="catalog message: the catalog") record: Optional[AirbyteRecordMessage] = Field(None, description="record message: the record") state: Optional[AirbyteStateMessage] = Field( None, description="schema message: the state. Must be the last message produced. The platform uses this information", ) class AirbyteProtocol(BaseModel): airbyte_message: Optional[AirbyteMessage] = None configured_airbyte_catalog: Optional[ConfiguredAirbyteCatalog] = None
53
1,333
0.726267
from __future__ import annotations from enum import Enum from typing import Any, Dict, List, Optional, Union from pydantic import AnyUrl, BaseModel, Extra, Field class Type(Enum): RECORD = "RECORD" STATE = "STATE" LOG = "LOG" SPEC = "SPEC" CONNECTION_STATUS = "CONNECTION_STATUS" CATALOG = "CATALOG" class AirbyteRecordMessage(BaseModel): class Config: extra = Extra.allow stream: str = Field(..., description="the name of this record's stream") data: Dict[str, Any] = Field(..., description="the record data") emitted_at: int = Field( ..., description="when the data was emitted from the source. epoch in millisecond.", ) namespace: Optional[str] = Field(None, description="the namespace of this record's stream") class AirbyteStateMessage(BaseModel): class Config: extra = Extra.allow data: Dict[str, Any] = Field(..., description="the state data") class Level(Enum): FATAL = "FATAL" ERROR = "ERROR" WARN = "WARN" INFO = "INFO" DEBUG = "DEBUG" TRACE = "TRACE" class AirbyteLogMessage(BaseModel): class Config: extra = Extra.allow level: Level = Field(..., description="the type of logging") message: str = Field(..., description="the log message") class Status(Enum): SUCCEEDED = "SUCCEEDED" FAILED = "FAILED" class AirbyteConnectionStatus(BaseModel): class Config: extra = Extra.allow status: Status message: Optional[str] = None class SyncMode(Enum): full_refresh = "full_refresh" incremental = "incremental" class DestinationSyncMode(Enum): append = "append" overwrite = "overwrite" append_dedup = "append_dedup" class OAuth2Specification(BaseModel): class Config: extra = Extra.allow rootObject: Optional[List[Union[str, int]]] = Field( None, description="A list of strings representing a pointer to the root object which contains any oauth parameters in the ConnectorSpecification.\nExamples:\nif oauth parameters were contained inside the top level, rootObject=[] If they were nested inside another object {'credentials': {'app_id' etc...}, rootObject=['credentials'] If they were inside a oneOf {'switch': {oneOf: [{client_id...}, {non_oauth_param]}}, rootObject=['switch', 0] ", ) oauthFlowInitParameters: Optional[List[List[str]]] = Field( None, description="Pointers to the fields in the rootObject needed to obtain the initial refresh/access tokens for the OAuth flow. Each inner array represents the path in the rootObject of the referenced field. For example. Assume the rootObject contains params 'app_secret', 'app_id' which are needed to get the initial refresh token. If they are not nested in the rootObject, then the array would look like this [['app_secret'], ['app_id']] If they are nested inside an object called 'auth_params' then this array would be [['auth_params', 'app_secret'], ['auth_params', 'app_id']]", ) oauthFlowOutputParameters: Optional[List[List[str]]] = Field( None, description="Pointers to the fields in the rootObject which can be populated from successfully completing the oauth flow using the init parameters. This is typically a refresh/access token. Each inner array represents the path in the rootObject of the referenced field.", ) class AuthType(Enum): oauth2_0 = "oauth2.0" class AuthSpecification(BaseModel): auth_type: Optional[AuthType] = None oauth2Specification: Optional[OAuth2Specification] = Field( None, description="If the connector supports OAuth, this field should be non-null.", ) class AuthFlowType(Enum): oauth2_0 = "oauth2.0" oauth1_0 = "oauth1.0" class OAuthConfigSpecification(BaseModel): oauth_user_input_from_connector_config_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations used as input to OAuth.\nMust be a valid non-nested JSON that refers to properties from ConnectorSpecification.connectionSpecification\nusing special annotation 'path_in_connector_config'.\nThese are input values the user is entering through the UI to authenticate to the connector, that might also shared\nas inputs for syncing data via the connector.\n\nExamples:\n\nif no connector values is shared during oauth flow, oauth_user_input_from_connector_config_specification=[]\nif connector values such as 'app_id' inside the top level are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['app_id']\n }\n }\nif connector values such as 'info.app_id' nested inside another object are used to generate the API url for the oauth flow,\n oauth_user_input_from_connector_config_specification={\n app_id: {\n type: string\n path_in_connector_config: ['info', 'app_id']\n }\n }", ) complete_oauth_output_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations produced by the OAuth flows as they are\nreturned by the distant OAuth APIs.\nMust be a valid JSON describing the fields to merge back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_output_specification={\n refresh_token: {\n type: string,\n path_in_connector_config: ['credentials', 'refresh_token']\n }\n }", ) complete_oauth_server_input_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations.\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nserver when completing an OAuth flow (typically exchanging an auth code for refresh token).\n\nExamples:\n\n complete_oauth_server_input_specification={\n client_id: {\n type: string\n },\n client_secret: {\n type: string\n }\n }", ) complete_oauth_server_output_specification: Optional[Dict[str, Any]] = Field( None, description="OAuth specific blob. This is a Json Schema used to validate Json configurations persisted as Airbyte Server configurations that\nalso need to be merged back into the connector configuration at runtime.\nThis is a subset configuration of `complete_oauth_server_input_specification` that filters fields out to retain only the ones that\nare necessary for the connector to function with OAuth. (some fields could be used during oauth flows but not needed afterwards, therefore\nthey would be listed in the `complete_oauth_server_input_specification` but not `complete_oauth_server_output_specification`)\nMust be a valid non-nested JSON describing additional fields configured by the Airbyte Instance or Workspace Admins to be used by the\nconnector when using OAuth flow APIs.\nThese fields are to be merged back to `ConnectorSpecification.connectionSpecification`.\nFor each field, a special annotation `path_in_connector_config` can be specified to determine where to merge it,\n\nExamples:\n\n complete_oauth_server_output_specification={\n client_id: {\n type: string,\n path_in_connector_config: ['credentials', 'client_id']\n },\n client_secret: {\n type: string,\n path_in_connector_config: ['credentials', 'client_secret']\n }\n }", ) class AirbyteStream(BaseModel): class Config: extra = Extra.allow name: str = Field(..., description="Stream's name.") json_schema: Dict[str, Any] = Field(..., description="Stream schema using Json Schema specs.") supported_sync_modes: Optional[List[SyncMode]] = None source_defined_cursor: Optional[bool] = Field( None, description="If the source defines the cursor field, then any other cursor field inputs will be ignored. If it does not, either the user_provided one is used, or the default one is used as a backup.", ) default_cursor_field: Optional[List[str]] = Field( None, description="Path to the field that will be used to determine if a record is new or modified since the last sync. If not provided by the source, the end user will have to specify the comparable themselves.", ) source_defined_primary_key: Optional[List[List[str]]] = Field( None, description="If the source defines the primary key, paths to the fields that will be used as a primary key. If not provided by the source, the end user will have to specify the primary key themselves.", ) namespace: Optional[str] = Field( None, description="Optional Source-defined namespace. Currently only used by JDBC destinations to determine what schema to write to. Airbyte streams from the same sources should have the same namespace.", ) class ConfiguredAirbyteStream(BaseModel): class Config: extra = Extra.allow stream: AirbyteStream sync_mode: SyncMode cursor_field: Optional[List[str]] = Field( None, description="Path to the field that will be used to determine if a record is new or modified since the last sync. This field is REQUIRED if `sync_mode` is `incremental`. Otherwise it is ignored.", ) destination_sync_mode: DestinationSyncMode primary_key: Optional[List[List[str]]] = Field( None, description="Paths to the fields that will be used as primary key. This field is REQUIRED if `destination_sync_mode` is `*_dedup`. Otherwise it is ignored.", ) class AdvancedAuth(BaseModel): auth_flow_type: Optional[AuthFlowType] = None predicate_key: Optional[List[str]] = Field( None, description="Json Path to a field in the connectorSpecification that should exist for the advanced auth to be applicable.", ) predicate_value: Optional[str] = Field( None, description="Value of the predicate_key fields for the advanced auth to be applicable.", ) oauth_config_specification: Optional[OAuthConfigSpecification] = None class ConnectorSpecification(BaseModel): class Config: extra = Extra.allow documentationUrl: Optional[AnyUrl] = None changelogUrl: Optional[AnyUrl] = None connectionSpecification: Dict[str, Any] = Field( ..., description="ConnectorDefinition specific blob. Must be a valid JSON string.", ) supportsIncremental: Optional[bool] = Field(None, description="If the connector supports incremental mode or not.") supportsNormalization: Optional[bool] = Field(False, description="If the connector supports normalization or not.") supportsDBT: Optional[bool] = Field(False, description="If the connector supports DBT or not.") supported_destination_sync_modes: Optional[List[DestinationSyncMode]] = Field( None, description="List of destination sync modes supported by the connector" ) authSpecification: Optional[AuthSpecification] = Field(None, description="deprecated, switching to advanced_auth instead") advanced_auth: Optional[AdvancedAuth] = Field( None, description="Additional and optional specification object to describe what an 'advanced' Auth flow would need to function.\n - A connector should be able to fully function with the configuration as described by the ConnectorSpecification in a 'basic' mode.\n - The 'advanced' mode provides easier UX for the user with UI improvements and automations. However, this requires further setup on the\n server side by instance or workspace admins beforehand. The trade-off is that the user does not have to provide as many technical\n inputs anymore and the auth process is faster and easier to complete.", ) class AirbyteCatalog(BaseModel): class Config: extra = Extra.allow streams: List[AirbyteStream] class ConfiguredAirbyteCatalog(BaseModel): class Config: extra = Extra.allow streams: List[ConfiguredAirbyteStream] class AirbyteMessage(BaseModel): class Config: extra = Extra.allow type: Type = Field(..., description="Message type") log: Optional[AirbyteLogMessage] = Field( None, description="log message: any kind of logging you want the platform to know about.", ) spec: Optional[ConnectorSpecification] = None connectionStatus: Optional[AirbyteConnectionStatus] = None catalog: Optional[AirbyteCatalog] = Field(None, description="catalog message: the catalog") record: Optional[AirbyteRecordMessage] = Field(None, description="record message: the record") state: Optional[AirbyteStateMessage] = Field( None, description="schema message: the state. Must be the last message produced. The platform uses this information", ) class AirbyteProtocol(BaseModel): airbyte_message: Optional[AirbyteMessage] = None configured_airbyte_catalog: Optional[ConfiguredAirbyteCatalog] = None
true
true
f703cc16c1bc121575b31193db24ab7d64ed8f56
3,337
py
Python
app/waypointapp.py
shinwachi/waypointapp
c7a5f8fda76b64fc0a8124bf5737dab2ca7e8301
[ "MIT", "Unlicense" ]
null
null
null
app/waypointapp.py
shinwachi/waypointapp
c7a5f8fda76b64fc0a8124bf5737dab2ca7e8301
[ "MIT", "Unlicense" ]
null
null
null
app/waypointapp.py
shinwachi/waypointapp
c7a5f8fda76b64fc0a8124bf5737dab2ca7e8301
[ "MIT", "Unlicense" ]
null
null
null
import os, collections, sqlite3 from flask import Flask, render_template from flask.ext.bootstrap import Bootstrap from AsciiDammit import asciiDammit app = Flask(__name__) bootstrap = Bootstrap(app) import util as wpu configDict = {} appDataDirDict = {} appName = "waypointapp" @app.route('/') def index(): appNames = appDataDirDict.keys() return render_template('index.html', appNames=appNames) @app.route('/reportAppIndex/<appName>') def reportAppIndex(appName): ''' Lists the runs for the assay. ''' answer = [] for app_name, app_dir in appDataDirDict.items(): if appName == app_name: dirname, dirnames, filenames = next(os.walk(app_dir)) # ignore the folder named "scrap" answer.extend([(app_name, run_id) for run_id in [x for x in dirnames if x != "scrap"]]) return render_template('reportAppIndex.html', app_name=appName, answer=answer) @app.route('/report_app/<app_name>/<run_id>') def report_app(app_name, run_id): return reportHelper(appDataDirDict[app_name], run_id, app_name) def reportHelper(localAppDatadir, run_id, app_name): # list all files in the report folder dirname, dirnames, filenames = next(os.walk(localAppDatadir+'/'+run_id)) filepaths = ["file://localhost/"+dirname+"/"+z for z in filenames ] # identify all png files in the directory and encode it into database images = [x for x in filenames if str(x).endswith('.png')] imagepaths = [dirname+"/"+x for x in images] imagetags = [] for ipath in imagepaths: data_uri = open(ipath, 'rb').read().encode('base64').replace('\n', '') img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri) imagetags.append(img_tag) # identify waypoint databases in the folder databases = [dirname+'/'+x for x in filenames if str(x).endswith('waypoint.sqlite') ] dbTables = collections.OrderedDict() colnames = {} if databases: for db in databases: conn = sqlite3.connect(db) c = conn.cursor() c.execute("SELECT name FROM sqlite_master WHERE type='table';") tblNms = sorted([tblNm[0] for tblNm in c.fetchall()]) # reorder tblNms according to tableOrder x = [d for d in configDict['applications'] if d['appName'] == app_name][0] if x and 'tableOrder' in x.keys(): tableOrder = x['tableOrder'] tn_in_db = [] for tn in tableOrder: if tn in tblNms: tn_in_db.append(tn) tblNms.remove(tn) tblNms = tn_in_db + tblNms tblTags= ["#%s"%tblNm for tblNm in tblNms] # Iterate over individual tables and retrieve the row data for display for tblNm in tblNms: rowcount = [row for row in c.execute("SELECT count(*) row_count FROM %s"%tblNm)][0][0] if rowcount < 500: rows = c.execute('select * from %s'%tblNm) # force ascii conversion for display colnames[tblNm] = [asciiDammit(description[0]) for description in c.description] dbTables[tblNm] = [[wpu.renderHtmlTableCell(x) for x in row] for row in rows] conn.close() return render_template('report.html', dbpaths=databases, run_id=run_id, tableNames=tblTags, filenames=filenames, filepaths=filepaths, imagetags=imagetags, dbTables=dbTables, colnames=colnames, app_name=app_name) if __name__ == '__main__': # read in the configuration file, then run the server configDict, appDataDirDict = wpu.loadConfig(configFile = 'appconfig.json') app.run(debug=True, host='0.0.0.0', port=5757)
36.271739
212
0.712017
import os, collections, sqlite3 from flask import Flask, render_template from flask.ext.bootstrap import Bootstrap from AsciiDammit import asciiDammit app = Flask(__name__) bootstrap = Bootstrap(app) import util as wpu configDict = {} appDataDirDict = {} appName = "waypointapp" @app.route('/') def index(): appNames = appDataDirDict.keys() return render_template('index.html', appNames=appNames) @app.route('/reportAppIndex/<appName>') def reportAppIndex(appName): answer = [] for app_name, app_dir in appDataDirDict.items(): if appName == app_name: dirname, dirnames, filenames = next(os.walk(app_dir)) answer.extend([(app_name, run_id) for run_id in [x for x in dirnames if x != "scrap"]]) return render_template('reportAppIndex.html', app_name=appName, answer=answer) @app.route('/report_app/<app_name>/<run_id>') def report_app(app_name, run_id): return reportHelper(appDataDirDict[app_name], run_id, app_name) def reportHelper(localAppDatadir, run_id, app_name): dirname, dirnames, filenames = next(os.walk(localAppDatadir+'/'+run_id)) filepaths = ["file://localhost/"+dirname+"/"+z for z in filenames ] images = [x for x in filenames if str(x).endswith('.png')] imagepaths = [dirname+"/"+x for x in images] imagetags = [] for ipath in imagepaths: data_uri = open(ipath, 'rb').read().encode('base64').replace('\n', '') img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri) imagetags.append(img_tag) databases = [dirname+'/'+x for x in filenames if str(x).endswith('waypoint.sqlite') ] dbTables = collections.OrderedDict() colnames = {} if databases: for db in databases: conn = sqlite3.connect(db) c = conn.cursor() c.execute("SELECT name FROM sqlite_master WHERE type='table';") tblNms = sorted([tblNm[0] for tblNm in c.fetchall()]) x = [d for d in configDict['applications'] if d['appName'] == app_name][0] if x and 'tableOrder' in x.keys(): tableOrder = x['tableOrder'] tn_in_db = [] for tn in tableOrder: if tn in tblNms: tn_in_db.append(tn) tblNms.remove(tn) tblNms = tn_in_db + tblNms tblTags= ["#%s"%tblNm for tblNm in tblNms] for tblNm in tblNms: rowcount = [row for row in c.execute("SELECT count(*) row_count FROM %s"%tblNm)][0][0] if rowcount < 500: rows = c.execute('select * from %s'%tblNm) colnames[tblNm] = [asciiDammit(description[0]) for description in c.description] dbTables[tblNm] = [[wpu.renderHtmlTableCell(x) for x in row] for row in rows] conn.close() return render_template('report.html', dbpaths=databases, run_id=run_id, tableNames=tblTags, filenames=filenames, filepaths=filepaths, imagetags=imagetags, dbTables=dbTables, colnames=colnames, app_name=app_name) if __name__ == '__main__': configDict, appDataDirDict = wpu.loadConfig(configFile = 'appconfig.json') app.run(debug=True, host='0.0.0.0', port=5757)
true
true
f703ce91cf2543bf6e32692a3a3e81e02753760e
7,646
py
Python
python/deepLearningTorch.py
demarley/leopard
52c5eb2dd732798972d429887c273f8449039c8f
[ "MIT" ]
null
null
null
python/deepLearningTorch.py
demarley/leopard
52c5eb2dd732798972d429887c273f8449039c8f
[ "MIT" ]
1
2018-08-26T16:48:47.000Z
2018-08-26T16:48:47.000Z
python/deepLearningTorch.py
demarley/leopard
52c5eb2dd732798972d429887c273f8449039c8f
[ "MIT" ]
1
2018-09-06T07:57:03.000Z
2018-09-06T07:57:03.000Z
""" Created: 16 August 2018 Last Updated: 16 August 2018 Dan Marley daniel.edison.marley@cernSPAMNOT.ch Texas A&M University ----- Class for performing deep learning in pytorch Designed for running on desktop at TAMU with specific set of software installed --> not guaranteed to work in CMSSW environment! Does not use ROOT directly. Instead, this is setup to use flat ntuples that are accessed via uproot. > UPROOT: https://github.com/scikit-hep/uproot > KERAS: https://keras.io/ > TENSORFLOW: https://www.tensorflow.org/ > PYTORCH: http://pytorch.org/ > LWTNN: https://github.com/lwtnn/lwtnn """ import json import util import datetime import collections from deepLearning import DeepLearning import uproot import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as tf from torch.autograd import Variable from sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_curve class LeopardNet(nn.Module): """Neural Network for Leopard in PyTorch Adapted from (16 August 2018) https://github.com/thongonary/surf18-tutorial/blob/master/tuto-8-torch.ipynb """ def __init__(self,layers): super(LeopardNet,self).__init__() self.dense = nn.ModuleList() for l,layer in enumerate(layers): self.dense.append( nn.Linear(layer['in'],layer['out']) ) def forward(self, x): """All the computation steps of the input are defined in this function""" nlayers = len(self.dense) for i,d in enumerate(self.dense): x = d(x) x = tf.relu(x) if i!=nlayers-1 else tf.sigmoid(x) return x class DeepLearningTorch(DeepLearning): """Deep Learning pytorch class""" def __init__(self): DeepLearning.__init__(self) ## PyTorch objects self.loss_fn = None # pytorch loss function self.torch_opt = None # pytorch optimizer def initialize(self): #,config): """Initialize a few parameters after they've been set by user""" DeepLearning.initialize(self) return ## Specific functions to perform training/inference tasks def build_model(self): """Construct the NN model -- only Keras support for now""" self.msg_svc.INFO("DLPYTORCH : Build the neural network model") ## Declare the model layers = [] layers.append( {'in':int(self.input_dim),'out':int(self.nNodes[0])} ) for i,n in enumerate(self.nNodes): if i==len(self.nNodes)-1: continue layers.append( {'in':int(n),'out':int(self.nNodes[i+1])} ) layers.append( {'in':int(self.nNodes[-1]),'out':self.output_dim} ) self.model = LeopardNet(layers) self.model.cuda() self.loss_fn = torch.nn.BCELoss() self.torch_opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) #1e-4) return def train_epoch(self,X,Y): """""" losses = [] for beg_i in range(0, len(X), self.batch_size): x_batch = torch.from_numpy(X[beg_i:beg_i+self.batch_size,:]) y_batch = torch.from_numpy(Y[beg_i:beg_i+self.batch_size]) x_batch = Variable(x_batch).cuda() y_batch = Variable(y_batch).float().unsqueeze_(-1).cuda() # modify dimensions (X,) -> (X,1) self.torch_opt.zero_grad() y_hat = self.model(x_batch) # forward loss = self.loss_fn(y_hat, y_batch) # compute loss loss.backward() # compute gradients self.torch_opt.step() # update weights losses.append(loss.data.cpu().numpy()) return losses def train_model(self): """Setup for training the model using k-fold cross-validation""" X = self.df[self.features].values Y = self.df['target'].values kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed) nsplits = kfold.get_n_splits(X,Y) cvpredictions = [] # compare outputs from each cross-validation self.msg_svc.INFO("DLPYTORCH : Fitting K-Fold cross validations") for ind,(train,test) in enumerate(kfold.split(X,Y)): self.msg_svc.INFO("DLPYTORCH : - Fitting K-Fold {0}".format(ind)) Y_train = Y[train] Y_test = Y[test] # -- store test/train data from each k-fold as histograms (to compare later) h_tests = {} h_trains = {} for n,v in self.targets.iteritems(): h_tests[n] = ROOT.TH1D("test_"+n,"test_"+n,10,0,10) h_trains[n] = ROOT.TH1D("train_"+n,"train_"+n,10,0,10) # fill histogram for each target for (n,v) in enumerate(self.targets.iteritems()): [h_tests[n].Fill(i) for i in X[test][np.where(Y_test==v)]] [h_trains[n].Fill(i) for i in X[train][np.where(Y_train==v)]] ## Fit the model to training data & save the history self.model.train() e_losses = [] for t in range(self.epochs): e_losses += self.train_epoch(X[train],Y_train) self.msg_svc.INFO("DLPYTORCH : Epoch {0} -- Loss {1}".format(t,e_losses[-1])) self.histories.append(e_losses) # evaluate the model self.msg_svc.DEBUG("DLPYTORCH : Evaluate the model: ") self.model.eval() # Evaluate training sample self.msg_svc.INFO("DLPYTORCH : Predictions from training sample") train_predictions = self.predict(X[train]) self.train_predictions.append(train_predictions) # Evaluate test sample self.msg_svc.INFO("DLPYTORCH : Predictions from testing sample") test_predictions = self.predict(X[test]) self.test_predictions.append(test_predictions) # Make ROC curve from test sample self.msg_svc.INFO("DLPYTORCH : Make ROC curves") fpr,tpr,_ = roc_curve(Y[test], test_predictions) self.fpr.append(fpr) self.tpr.append(tpr) # Plot the predictions to compare test/train self.msg_svc.INFO("DLPYTORCH : Plot the train/test predictions") self.plotter.prediction(h_trains,h_tests) # compare DNN prediction for different targets self.msg_svc.INFO("DLPYTORCH : Finished K-Fold cross-validation: ") self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)} self.msg_svc.INFO("DLPYTORCH : - Accuracy: {0:.2f}% (+/- {1:.2f}%)".format(np.mean(cvpredictions), np.std(cvpredictions))) return def predict(self,data=None): """Return the prediction from a test sample""" self.msg_svc.DEBUG("DLPYTORCH : Get the DNN prediction") if data is None: self.msg_svc.ERROR("DLPYTORCH : predict() given NoneType data. Returning -999.") return -999. data = torch.from_numpy(data) return self.model( Variable(data,volatile=True).cuda() ) def load_model(self,from_lwtnn=False): """Load existing model to make plots or predictions""" output = self.output_dir+'/'+self.model_name self.model.load_state_dict(torch.load(output)) self.model.eval() return def save_model(self,to_lwtnn=False): """Save the model for use later""" output = self.output_dir+'/'+self.model_name torch.save(self.model.state_dict(),output) return ## THE END ##
34.913242
132
0.615747
import json import util import datetime import collections from deepLearning import DeepLearning import uproot import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as tf from torch.autograd import Variable from sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_curve class LeopardNet(nn.Module): def __init__(self,layers): super(LeopardNet,self).__init__() self.dense = nn.ModuleList() for l,layer in enumerate(layers): self.dense.append( nn.Linear(layer['in'],layer['out']) ) def forward(self, x): nlayers = len(self.dense) for i,d in enumerate(self.dense): x = d(x) x = tf.relu(x) if i!=nlayers-1 else tf.sigmoid(x) return x class DeepLearningTorch(DeepLearning): def __init__(self): DeepLearning.__init__(self) self.loss_fn = None self.torch_opt = None def initialize(self): DeepLearning.initialize(self) return def build_model(self): self.msg_svc.INFO("DLPYTORCH : Build the neural network model") layers = [] layers.append( {'in':int(self.input_dim),'out':int(self.nNodes[0])} ) for i,n in enumerate(self.nNodes): if i==len(self.nNodes)-1: continue layers.append( {'in':int(n),'out':int(self.nNodes[i+1])} ) layers.append( {'in':int(self.nNodes[-1]),'out':self.output_dim} ) self.model = LeopardNet(layers) self.model.cuda() self.loss_fn = torch.nn.BCELoss() self.torch_opt = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) return def train_epoch(self,X,Y): losses = [] for beg_i in range(0, len(X), self.batch_size): x_batch = torch.from_numpy(X[beg_i:beg_i+self.batch_size,:]) y_batch = torch.from_numpy(Y[beg_i:beg_i+self.batch_size]) x_batch = Variable(x_batch).cuda() y_batch = Variable(y_batch).float().unsqueeze_(-1).cuda() self.torch_opt.zero_grad() y_hat = self.model(x_batch) loss = self.loss_fn(y_hat, y_batch) loss.backward() self.torch_opt.step() losses.append(loss.data.cpu().numpy()) return losses def train_model(self): X = self.df[self.features].values Y = self.df['target'].values kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed) nsplits = kfold.get_n_splits(X,Y) cvpredictions = [] self.msg_svc.INFO("DLPYTORCH : Fitting K-Fold cross validations") for ind,(train,test) in enumerate(kfold.split(X,Y)): self.msg_svc.INFO("DLPYTORCH : - Fitting K-Fold {0}".format(ind)) Y_train = Y[train] Y_test = Y[test] h_tests = {} h_trains = {} for n,v in self.targets.iteritems(): h_tests[n] = ROOT.TH1D("test_"+n,"test_"+n,10,0,10) h_trains[n] = ROOT.TH1D("train_"+n,"train_"+n,10,0,10) for (n,v) in enumerate(self.targets.iteritems()): [h_tests[n].Fill(i) for i in X[test][np.where(Y_test==v)]] [h_trains[n].Fill(i) for i in X[train][np.where(Y_train==v)]] self.model.train() e_losses = [] for t in range(self.epochs): e_losses += self.train_epoch(X[train],Y_train) self.msg_svc.INFO("DLPYTORCH : Epoch {0} -- Loss {1}".format(t,e_losses[-1])) self.histories.append(e_losses) self.msg_svc.DEBUG("DLPYTORCH : Evaluate the model: ") self.model.eval() self.msg_svc.INFO("DLPYTORCH : Predictions from training sample") train_predictions = self.predict(X[train]) self.train_predictions.append(train_predictions) self.msg_svc.INFO("DLPYTORCH : Predictions from testing sample") test_predictions = self.predict(X[test]) self.test_predictions.append(test_predictions) self.msg_svc.INFO("DLPYTORCH : Make ROC curves") fpr,tpr,_ = roc_curve(Y[test], test_predictions) self.fpr.append(fpr) self.tpr.append(tpr) self.msg_svc.INFO("DLPYTORCH : Plot the train/test predictions") self.plotter.prediction(h_trains,h_tests) self.msg_svc.INFO("DLPYTORCH : Finished K-Fold cross-validation: ") self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)} self.msg_svc.INFO("DLPYTORCH : - Accuracy: {0:.2f}% (+/- {1:.2f}%)".format(np.mean(cvpredictions), np.std(cvpredictions))) return def predict(self,data=None): self.msg_svc.DEBUG("DLPYTORCH : Get the DNN prediction") if data is None: self.msg_svc.ERROR("DLPYTORCH : predict() given NoneType data. Returning -999.") return -999. data = torch.from_numpy(data) return self.model( Variable(data,volatile=True).cuda() ) def load_model(self,from_lwtnn=False): output = self.output_dir+'/'+self.model_name self.model.load_state_dict(torch.load(output)) self.model.eval() return def save_model(self,to_lwtnn=False): output = self.output_dir+'/'+self.model_name torch.save(self.model.state_dict(),output) return
true
true
f703ced66ac13bee7f8100c8b3e3b18e720c8fc6
793
py
Python
core/csrc/torch_nndistance/test.py
THU-DA-6D-Pose-Group/self6dpp
c267cfa55e440e212136a5e9940598720fa21d16
[ "Apache-2.0" ]
33
2021-12-15T07:11:47.000Z
2022-03-29T08:58:32.000Z
core/csrc/torch_nndistance/test.py
THU-DA-6D-Pose-Group/self6dpp
c267cfa55e440e212136a5e9940598720fa21d16
[ "Apache-2.0" ]
3
2021-12-15T11:39:54.000Z
2022-03-29T07:24:23.000Z
core/csrc/torch_nndistance/test.py
THU-DA-6D-Pose-Group/self6dpp
c267cfa55e440e212136a5e9940598720fa21d16
[ "Apache-2.0" ]
null
null
null
import torch import os.path as osp import sys from torch.autograd import Variable cur_dir = osp.dirname(osp.abspath(__file__)) sys.path.insert(0, cur_dir) import torch_nndistance as NND p1 = torch.rand(10, 1000, 3) p2 = torch.rand(10, 1500, 3) points1 = Variable(p1, requires_grad=True) points2 = p2 points1 = points1.cuda() print(points1.requires_grad) points2 = points2.cuda() dist1, dist2 = NND.nnd(points1, points2) print(dist1, dist2) loss = torch.sum(dist1) print("loss", loss) loss.backward() print(points1.grad, points2.grad) print("====================") points1 = Variable(p1.cuda(), requires_grad=True) points2 = p2.cuda() dist1, dist2 = NND.nnd(points1, points2) print(dist1, dist2) loss = torch.sum(dist1) print("loss", loss) loss.backward() print(points1.grad, points2.grad)
23.323529
49
0.725095
import torch import os.path as osp import sys from torch.autograd import Variable cur_dir = osp.dirname(osp.abspath(__file__)) sys.path.insert(0, cur_dir) import torch_nndistance as NND p1 = torch.rand(10, 1000, 3) p2 = torch.rand(10, 1500, 3) points1 = Variable(p1, requires_grad=True) points2 = p2 points1 = points1.cuda() print(points1.requires_grad) points2 = points2.cuda() dist1, dist2 = NND.nnd(points1, points2) print(dist1, dist2) loss = torch.sum(dist1) print("loss", loss) loss.backward() print(points1.grad, points2.grad) print("====================") points1 = Variable(p1.cuda(), requires_grad=True) points2 = p2.cuda() dist1, dist2 = NND.nnd(points1, points2) print(dist1, dist2) loss = torch.sum(dist1) print("loss", loss) loss.backward() print(points1.grad, points2.grad)
true
true
f703cfe3cdfc3e3b298ee7f07fe599e74fbf88d9
1,608
py
Python
linesman/geo.py
burrscurr/linesman
ace4b38ac54e2cec29e49023cb725afe040c017a
[ "MIT" ]
3
2021-01-05T21:00:26.000Z
2021-01-26T00:08:26.000Z
linesman/geo.py
burrscurr/linesman
ace4b38ac54e2cec29e49023cb725afe040c017a
[ "MIT" ]
47
2021-01-04T17:31:26.000Z
2022-02-14T04:03:47.000Z
linesman/geo.py
burrscurr/linesman
ace4b38ac54e2cec29e49023cb725afe040c017a
[ "MIT" ]
1
2021-01-05T21:02:23.000Z
2021-01-05T21:02:23.000Z
from geographiclib.geodesic import Geodesic from pyproj import CRS, Transformer from .geometry import Vector, Line def azimuth(p1: Vector, p2: Vector): """:return: azimuth of geodesic through p1 and p2 in p1 with WGS84""" res = Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x) return res['azi1'] def dist_m(a, b): """ :param a: lon lat point :param b: lon lat point :return: distance between a and b in meters """ res = Geodesic.WGS84.Inverse(a.y, a.x, b.y, b.x) return res['s12'] def mercator_project(origin: Vector, azimuth, points: [Vector], ellps='WGS84'): """ Perform a oblique mercator projection of a given list of points with the pseudoequator defined by the given line. Formulas from DOI 10.3133/pp1395 p.69 (Map projections: A working manual) :param origin: (lon, lat) that will become (0, 0) in projection :param azimuth: azimuth in degrees of origin defining the direction of the geodesic that becomes the new equator (y=0) in projection :param points: iterable of (lon,lat) Vector instance :param ellps: proj ellipsoid identifier for ellipsoid to use as model for the globe. Defaults to WGS84. :return: iterable of (x, y) Vector instances in the coordinate system with unit 1 meter """ base = CRS.from_user_input(4326) mercator = CRS(f'+proj=omerc +lonc={origin.x} +lat_0={origin.y} ' f'+alpha={azimuth} +gamma=0 +ellps={ellps}') t = Transformer.from_crs(base, mercator) for p in points: res = t.transform(p.y, p.x) yield Vector(res[1], res[0])
35.733333
79
0.670398
from geographiclib.geodesic import Geodesic from pyproj import CRS, Transformer from .geometry import Vector, Line def azimuth(p1: Vector, p2: Vector): res = Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x) return res['azi1'] def dist_m(a, b): res = Geodesic.WGS84.Inverse(a.y, a.x, b.y, b.x) return res['s12'] def mercator_project(origin: Vector, azimuth, points: [Vector], ellps='WGS84'): base = CRS.from_user_input(4326) mercator = CRS(f'+proj=omerc +lonc={origin.x} +lat_0={origin.y} ' f'+alpha={azimuth} +gamma=0 +ellps={ellps}') t = Transformer.from_crs(base, mercator) for p in points: res = t.transform(p.y, p.x) yield Vector(res[1], res[0])
true
true
f703d29d936bbe5b59762ca968f7b33cb9db5fbd
1,466
py
Python
WEEKS/CD_Sata-Structures/_MISC/algorithms/tree/segment_tree/segment_tree.py
webdevhub42/Lambda
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
[ "MIT" ]
11
2021-02-18T04:53:44.000Z
2022-01-16T10:57:39.000Z
WEEKS/CD_Sata-Structures/_MISC/algorithms/tree/segment_tree/segment_tree.py
webdevhub42/Lambda
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
[ "MIT" ]
162
2021-03-09T01:52:11.000Z
2022-03-12T01:09:07.000Z
WEEKS/CD_Sata-Structures/_MISC/algorithms/tree/segment_tree/segment_tree.py
webdevhub42/Lambda
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
[ "MIT" ]
8
2021-02-18T05:12:34.000Z
2022-03-06T19:02:14.000Z
""" Segment_tree creates a segment tree with a given array and function, allowing queries to be done later in log(N) time function takes 2 values and returns a same type value """ class SegmentTree: def __init__(self, arr, function): self.segment = [0 for x in range(3 * len(arr) + 3)] self.arr = arr self.fn = function self.maketree(0, 0, len(arr) - 1) def make_tree(self, i, l, r): if l == r: self.segment[i] = self.arr[l] elif l < r: self.make_tree(2 * i + 1, l, int((l + r) / 2)) self.make_tree(2 * i + 2, int((l + r) / 2) + 1, r) self.segment[i] = self.fn(self.segment[2 * i + 1], self.segment[2 * i + 2]) def __query(self, i, L, R, l, r): if l > R or r < L or L > R or l > r: return None if L >= l and R <= r: return self.segment[i] val1 = self.__query(2 * i + 1, L, int((L + R) / 2), l, r) val2 = self.__query(2 * i + 2, int((L + R + 2) / 2), R, l, r) print(L, R, " returned ", val1, val2) if val1 != None: if val2 != None: return self.fn(val1, val2) return val1 return val2 def query(self, L, R): return self.__query(0, 0, len(self.arr) - 1, L, R) """ Example - mytree = SegmentTree([2,4,5,3,4],max) mytree.query(2,4) mytree.query(0,3) ... mytree = SegmentTree([4,5,2,3,4,43,3],sum) mytree.query(1,8) ... """
28.192308
87
0.514325
class SegmentTree: def __init__(self, arr, function): self.segment = [0 for x in range(3 * len(arr) + 3)] self.arr = arr self.fn = function self.maketree(0, 0, len(arr) - 1) def make_tree(self, i, l, r): if l == r: self.segment[i] = self.arr[l] elif l < r: self.make_tree(2 * i + 1, l, int((l + r) / 2)) self.make_tree(2 * i + 2, int((l + r) / 2) + 1, r) self.segment[i] = self.fn(self.segment[2 * i + 1], self.segment[2 * i + 2]) def __query(self, i, L, R, l, r): if l > R or r < L or L > R or l > r: return None if L >= l and R <= r: return self.segment[i] val1 = self.__query(2 * i + 1, L, int((L + R) / 2), l, r) val2 = self.__query(2 * i + 2, int((L + R + 2) / 2), R, l, r) print(L, R, " returned ", val1, val2) if val1 != None: if val2 != None: return self.fn(val1, val2) return val1 return val2 def query(self, L, R): return self.__query(0, 0, len(self.arr) - 1, L, R)
true
true