hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
0de91d0b3d8dee61091229402433dcdab535ab49
181
py
Python
utilities/io/__init__.py
wong-ck/DeepSegment
01c04b2d80355b97d3494e0073ba35ef9c98e546
[ "MIT" ]
null
null
null
utilities/io/__init__.py
wong-ck/DeepSegment
01c04b2d80355b97d3494e0073ba35ef9c98e546
[ "MIT" ]
null
null
null
utilities/io/__init__.py
wong-ck/DeepSegment
01c04b2d80355b97d3494e0073ba35ef9c98e546
[ "MIT" ]
null
null
null
# Written by Chun Kit Wong and CIRC under MIT license: # https://github.com/wong-ck/DeepSegment/blob/master/LICENSE from utilities.io import reader from utilities.io import writer
30.166667
60
0.79558
29
181
4.965517
0.793103
0.180556
0.208333
0.291667
0
0
0
0
0
0
0
0
0.121547
181
5
61
36.2
0.90566
0.61326
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
21a65c79f6e405e20bf8d260075e144d89e0d784
496
py
Python
keras-ssd-master/run_ssd.py
dlsaavedra/Detector_GDXray
1e120f8fa548819eef1b86ccfbbe306b44405b6f
[ "MIT" ]
3
2021-05-27T07:27:44.000Z
2022-02-19T05:20:16.000Z
keras-ssd-master/run_ssd.py
dlsaavedra/Detector_GDXray
1e120f8fa548819eef1b86ccfbbe306b44405b6f
[ "MIT" ]
8
2020-09-25T22:34:27.000Z
2022-02-10T01:09:19.000Z
keras-ssd-master/run_ssd.py
dlsaavedra/Detector_GDXray
1e120f8fa548819eef1b86ccfbbe306b44405b6f
[ "MIT" ]
3
2020-03-18T20:27:18.000Z
2021-11-03T03:10:11.000Z
import os #os.mkdir('../Experimento_3/Resultados_ssd') #os.mkdir('../Experimento_3/Resultados_ssd/ssd512') #os.mkdir('../Experimento_3/Resultados_ssd/ssd300') #os.mkdir('../Experimento_3/Resultados_ssd/ssd7') print ('Training ssd7') os.system('python train.py -c config_7.json') print ('Testing ssd7') os.system('python train.py -c config_7.json') print ('Training ssd300') os.system('python train.py -c config_300.json') print ('Testing ssd300') os.system('python train.py -c config_7.json')
29.176471
51
0.741935
77
496
4.623377
0.285714
0.078652
0.202247
0.213483
0.800562
0.800562
0.441011
0.441011
0.328652
0.235955
0
0.054466
0.074597
496
16
52
31
0.721133
0.385081
0
0.333333
0
0
0.611296
0
0
0
0
0
0
1
0
true
0
0.111111
0
0.111111
0.444444
0
0
0
null
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
7
df3d519ebc3661f93895456c2163bba35a1c298c
26,227
py
Python
mealpy/system_based/AEO.py
rishavpramanik/mealpy
d4a4d5810f15837764e4ee61517350fef3dc92b3
[ "MIT" ]
null
null
null
mealpy/system_based/AEO.py
rishavpramanik/mealpy
d4a4d5810f15837764e4ee61517350fef3dc92b3
[ "MIT" ]
null
null
null
mealpy/system_based/AEO.py
rishavpramanik/mealpy
d4a4d5810f15837764e4ee61517350fef3dc92b3
[ "MIT" ]
null
null
null
#!/usr/bin/env python # Created by "Thieu" at 16:44, 18/03/2020 ----------% # Email: nguyenthieu2102@gmail.com % # Github: https://github.com/thieu1995 % # --------------------------------------------------% import numpy as np from copy import deepcopy from mealpy.optimizer import Optimizer class OriginalAEO(Optimizer): """ The original version of: Artificial Ecosystem-based Optimization (AEO) Links: 1. https://doi.org/10.1007/s00521-019-04452-x 2. https://www.mathworks.com/matlabcentral/fileexchange/72685-artificial-ecosystem-based-optimization-aeo Examples ~~~~~~~~ >>> import numpy as np >>> from mealpy.system_based.AEO import OriginalAEO >>> >>> def fitness_function(solution): >>> return np.sum(solution**2) >>> >>> problem_dict1 = { >>> "fit_func": fitness_function, >>> "lb": [-10, -15, -4, -2, -8], >>> "ub": [10, 15, 12, 8, 20], >>> "minmax": "min", >>> } >>> >>> epoch = 1000 >>> pop_size = 50 >>> model = OriginalAEO(problem_dict1, epoch, pop_size) >>> best_position, best_fitness = model.solve() >>> print(f"Solution: {best_position}, Fitness: {best_fitness}") References ~~~~~~~~~~ [1] Zhao, W., Wang, L. and Zhang, Z., 2020. Artificial ecosystem-based optimization: a novel nature-inspired meta-heuristic algorithm. Neural Computing and Applications, 32(13), pp.9383-9425. """ def __init__(self, problem, epoch=10000, pop_size=100, **kwargs): """ Args: problem (dict): The problem dictionary problem (dict): The problem dictionary epoch (int): maximum number of iterations, default = 10000 pop_size (int): number of population size, default = 100 """ super().__init__(problem, kwargs) self.epoch = self.validator.check_int("epoch", epoch, [1, 100000]) self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000]) self.nfe_per_epoch = 2 * self.pop_size self.sort_flag = True def evolve(self, epoch): """ The main operations (equations) of algorithm. Inherit from Optimizer class Args: epoch (int): The current iteration """ ## Production - Update the worst agent # Eq. 2, 3, 1 a = (1.0 - epoch / self.epoch) * np.random.uniform() x1 = (1 - a) * self.pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub) pos_new = self.amend_position(x1, self.problem.lb, self.problem.ub) target = self.get_target_wrapper(pos_new) self.pop[-1] = [pos_new, target] ## Consumption - Update the whole population left pop_new = [] for idx in range(0, self.pop_size - 1): rand = np.random.random() # Eq. 4, 5, 6 v1 = np.random.normal(0, 1) v2 = np.random.normal(0, 1) c = 0.5 * v1 / abs(v2) # Consumption factor if idx == 0: j = 1 else: j = np.random.randint(0, idx) ### Herbivore if rand < 1.0 / 3: x_t1 = self.pop[idx][self.ID_POS] + c * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) # Eq. 6 ### Carnivore elif 1.0 / 3 <= rand and rand <= 2.0 / 3: x_t1 = self.pop[idx][self.ID_POS] + c * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS]) # Eq. 7 ### Omnivore else: r2 = np.random.uniform() x_t1 = self.pop[idx][self.ID_POS] + c * (r2 * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) + (1 - r2) * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS])) pos_new = self.amend_position(x_t1, self.problem.lb, self.problem.ub) pop_new.append([pos_new, None]) pop_new = self.update_target_wrapper_population(pop_new) pop_new.append(deepcopy(self.pop[-1])) pop_new = self.greedy_selection_population(self.pop, pop_new) ## find current best used in decomposition _, best = self.get_global_best_solution(pop_new) ## Decomposition ### Eq. 10, 11, 12, 9 pop_child = [] for idx in range(0, self.pop_size): r3 = np.random.uniform() d = 3 * np.random.normal(0, 1) e = r3 * np.random.randint(1, 3) - 1 h = 2 * r3 - 1 x_t1 = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * pop_new[idx][self.ID_POS]) pos_new = self.amend_position(x_t1, self.problem.lb, self.problem.ub) pop_child.append([pos_new, None]) pop_child = self.update_target_wrapper_population(pop_child) self.pop = self.greedy_selection_population(pop_new, pop_child) class IAEO(OriginalAEO): """ The original version of: Improved Artificial Ecosystem-based Optimization (IAEO) Links: 1. https://doi.org/10.1016/j.ijhydene.2020.06.256 Examples ~~~~~~~~ >>> import numpy as np >>> from mealpy.system_based.AEO import IAEO >>> >>> def fitness_function(solution): >>> return np.sum(solution**2) >>> >>> problem_dict1 = { >>> "fit_func": fitness_function, >>> "lb": [-10, -15, -4, -2, -8], >>> "ub": [10, 15, 12, 8, 20], >>> "minmax": "min", >>> } >>> >>> epoch = 1000 >>> pop_size = 50 >>> model = IAEO(problem_dict1, epoch, pop_size) >>> best_position, best_fitness = model.solve() >>> print(f"Solution: {best_position}, Fitness: {best_fitness}") References ~~~~~~~~~~ [1] Rizk-Allah, R.M. and El-Fergany, A.A., 2021. Artificial ecosystem optimizer for parameters identification of proton exchange membrane fuel cells model. International Journal of Hydrogen Energy, 46(75), pp.37612-37627. """ def __init__(self, problem, epoch=10000, pop_size=100, **kwargs): """ Args: problem (dict): The problem dictionary epoch (int): maximum number of iterations, default = 10000 pop_size (int): number of population size, default = 100 """ super().__init__(problem, epoch, pop_size, **kwargs) def evolve(self, epoch): """ The main operations (equations) of algorithm. Inherit from Optimizer class Args: epoch (int): The current iteration """ ## Production - Update the worst agent # Eq. 2, 3, 1 a = (1.0 - epoch / self.epoch) * np.random.uniform() x1 = (1 - a) * self.pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub) pos_new = self.amend_position(x1, self.problem.lb, self.problem.ub) target = self.get_target_wrapper(pos_new) self.pop[-1] = [pos_new, target] ## Consumption - Update the whole population left pop_new = [] for idx in range(0, self.pop_size - 1): rand = np.random.random() # Eq. 4, 5, 6 v1 = np.random.normal(0, 1) v2 = np.random.normal(0, 1) c = 0.5 * v1 / abs(v2) # Consumption factor if idx == 0: j = 1 else: j = np.random.randint(0, idx) ### Herbivore if rand < 1.0 / 3: x_t1 = self.pop[idx][self.ID_POS] + c * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) # Eq. 6 ### Carnivore elif 1.0 / 3 <= rand and rand <= 2.0 / 3: x_t1 = self.pop[idx][self.ID_POS] + c * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS]) # Eq. 7 ### Omnivore else: r2 = np.random.uniform() x_t1 = self.pop[idx][self.ID_POS] + c * (r2 * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) + (1 - r2) * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS])) pos_new = self.amend_position(x_t1, self.problem.lb, self.problem.ub) pop_new.append([pos_new, None]) pop_new = self.update_target_wrapper_population(pop_new) pop_new.append(deepcopy(self.pop[-1])) pop_new = self.greedy_selection_population(self.pop, pop_new) ## find current best used in decomposition _, best = self.get_global_best_solution(pop_new) ## Decomposition ### Eq. 10, 11, 12, 9 pop_child = [] for idx in range(0, self.pop_size): r3 = np.random.uniform() d = 3 * np.random.normal(0, 1) e = r3 * np.random.randint(1, 3) - 1 h = 2 * r3 - 1 x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * pop_new[idx][self.ID_POS]) if np.random.random() < 0.5: beta = 1 - (1 - 0) * ((epoch + 1) / self.epoch) # Eq. 21 x_r = pop_new[np.random.randint(0, self.pop_size - 1)][self.ID_POS] if np.random.random() < 0.5: x_new = beta * x_r + (1 - beta) * pop_new[idx][self.ID_POS] else: x_new = beta * pop_new[idx][self.ID_POS] + (1 - beta) * x_r else: best[self.ID_POS] = best[self.ID_POS] + np.random.normal() * best[self.ID_POS] pos_new = self.amend_position(x_new, self.problem.lb, self.problem.ub) pop_child.append([pos_new, None]) pop_child = self.update_target_wrapper_population(pop_child) self.pop = self.greedy_selection_population(pop_new, pop_child) class EnhancedAEO(Optimizer): """ The original version of: Enhanced Artificial Ecosystem-Based Optimization (EAEO) Links: 1. https://doi.org/10.1109/ACCESS.2020.3027654 Examples ~~~~~~~~ >>> import numpy as np >>> from mealpy.system_based.AEO import EnhancedAEO >>> >>> def fitness_function(solution): >>> return np.sum(solution**2) >>> >>> problem_dict1 = { >>> "fit_func": fitness_function, >>> "lb": [-10, -15, -4, -2, -8], >>> "ub": [10, 15, 12, 8, 20], >>> "minmax": "min", >>> } >>> >>> epoch = 1000 >>> pop_size = 50 >>> model = EnhancedAEO(problem_dict1, epoch, pop_size) >>> best_position, best_fitness = model.solve() >>> print(f"Solution: {best_position}, Fitness: {best_fitness}") References ~~~~~~~~~~ [1] Eid, A., Kamel, S., Korashy, A. and Khurshaid, T., 2020. An enhanced artificial ecosystem-based optimization for optimal allocation of multiple distributed generations. IEEE Access, 8, pp.178493-178513. """ def __init__(self, problem, epoch=10000, pop_size=100, **kwargs): """ Args: problem (dict): The problem dictionary epoch (int): maximum number of iterations, default = 10000 pop_size (int): number of population size, default = 100 """ super().__init__(problem, kwargs) self.epoch = self.validator.check_int("epoch", epoch, [1, 100000]) self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000]) self.nfe_per_epoch = 2 * self.pop_size self.sort_flag = True def evolve(self, epoch): """ The main operations (equations) of algorithm. Inherit from Optimizer class Args: epoch (int): The current iteration """ ## Production - Update the worst agent # Eq. 13 a = 2 * (1 - (epoch + 1) / self.epoch) x1 = (1 - a) * self.pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub) pos_new = self.amend_position(x1, self.problem.lb, self.problem.ub) target = self.get_target_wrapper(pos_new) self.pop[-1] = [pos_new, target] ## Consumption - Update the whole population left pop_new = [] for idx in range(0, self.pop_size - 1): rand = np.random.random() # Eq. 4, 5, 6 v1 = np.random.normal(0, 1) v2 = np.random.normal(0, 1) c = 0.5 * v1 / abs(v2) # Consumption factor r3 = 2 * np.pi * np.random.random() r4 = np.random.random() if idx == 0: j = 1 else: j = np.random.randint(0, idx) ### Herbivore if rand <= 1.0 / 3: # Eq. 15 if r4 <= 0.5: x_t1 = self.pop[idx][self.ID_POS] + np.sin(r3) * c * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) else: x_t1 = self.pop[idx][self.ID_POS] + np.cos(r3) * c * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) ### Carnivore elif 1.0 / 3 <= rand and rand <= 2.0 / 3: # Eq. 16 if r4 <= 0.5: x_t1 = self.pop[idx][self.ID_POS] + np.sin(r3) * c * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS]) else: x_t1 = self.pop[idx][self.ID_POS] + np.cos(r3) * c * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS]) ### Omnivore else: # Eq. 17 r5 = np.random.random() if r4 <= 0.5: x_t1 = self.pop[idx][self.ID_POS] + np.sin(r5) * c * (r5 * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) + (1 - r5) * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS])) else: x_t1 = self.pop[idx][self.ID_POS] + np.cos(r5) * c * (r5 * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) + (1 - r5) * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS])) pos_new = self.amend_position(x_t1, self.problem.lb, self.problem.ub) pop_new.append([pos_new, None]) pop_new = self.update_target_wrapper_population(pop_new) pop_new.append(deepcopy(self.pop[-1])) pop_new = self.greedy_selection_population(self.pop, pop_new) ## find current best used in decomposition _, best = self.get_global_best_solution(pop_new) ## Decomposition ### Eq. 10, 11, 12, 9 pop_child = [] for idx in range(0, self.pop_size): r3 = np.random.uniform() d = 3 * np.random.normal(0, 1) e = r3 * np.random.randint(1, 3) - 1 h = 2 * r3 - 1 # x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * agent_i[self.ID_POS]) if np.random.random() < 0.5: beta = 1 - (1 - 0) * ((epoch + 1) / self.epoch) # Eq. 21 r_idx = np.random.choice(list(set(range(0, self.pop_size)) - {idx})) x_r = pop_new[r_idx][self.ID_POS] # x_r = pop[np.random.randint(0, self.pop_size-1)][self.ID_POS] if np.random.random() < 0.5: x_new = beta * x_r + (1 - beta) * pop_new[idx][self.ID_POS] else: x_new = (1 - beta) * x_r + beta * pop_new[idx][self.ID_POS] else: x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * pop_new[idx][self.ID_POS]) # x_new = best[self.ID_POS] + np.random.normal() * best[self.ID_POS] pos_new = self.amend_position(x_new, self.problem.lb, self.problem.ub) pop_child.append([pos_new, None]) pop_child = self.update_target_wrapper_population(pop_child) self.pop = self.greedy_selection_population(pop_new, pop_child) class ModifiedAEO(Optimizer): """ The original version of: Modified Artificial Ecosystem-Based Optimization (MAEO) Links: 1. https://doi.org/10.1109/ACCESS.2020.2973351 Examples ~~~~~~~~ >>> import numpy as np >>> from mealpy.system_based.AEO import ModifiedAEO >>> >>> def fitness_function(solution): >>> return np.sum(solution**2) >>> >>> problem_dict1 = { >>> "fit_func": fitness_function, >>> "lb": [-10, -15, -4, -2, -8], >>> "ub": [10, 15, 12, 8, 20], >>> "minmax": "min", >>> } >>> >>> epoch = 1000 >>> pop_size = 50 >>> model = ModifiedAEO(problem_dict1, epoch, pop_size) >>> best_position, best_fitness = model.solve() >>> print(f"Solution: {best_position}, Fitness: {best_fitness}") References ~~~~~~~~~~ [1] Menesy, A.S., Sultan, H.M., Korashy, A., Banakhr, F.A., Ashmawy, M.G. and Kamel, S., 2020. Effective parameter extraction of different polymer electrolyte membrane fuel cell stack models using a modified artificial ecosystem optimization algorithm. IEEE Access, 8, pp.31892-31909. """ def __init__(self, problem, epoch=10000, pop_size=100, **kwargs): """ Args: problem (dict): The problem dictionary epoch (int): maximum number of iterations, default = 10000 pop_size (int): number of population size, default = 100 """ super().__init__(problem, kwargs) self.epoch = self.validator.check_int("epoch", epoch, [1, 100000]) self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000]) self.nfe_per_epoch = 2 * self.pop_size self.sort_flag = True def evolve(self, epoch): """ The main operations (equations) of algorithm. Inherit from Optimizer class Args: epoch (int): The current iteration """ ## Production # Eq. 22 H = 2 * (1 - (epoch + 1) / self.epoch) a = (1 - (epoch + 1) / self.epoch) * np.random.random() x1 = (1 - a) * self.pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub) pos_new = self.amend_position(x1, self.problem.lb, self.problem.ub) target = self.get_target_wrapper(pos_new) self.pop[-1] = [pos_new, target] ## Consumption - Update the whole population left pop_new = [] for idx in range(0, self.pop_size - 1): rand = np.random.random() # Eq. 4, 5, 6 v1 = np.random.normal(0, 1) v2 = np.random.normal(0, 1) c = 0.5 * v1 / abs(v2) # Consumption factor if idx == 0: j = 1 else: j = np.random.randint(0, idx) ### Herbivore if rand <= 1.0 / 3: # Eq. 23 pos_new = self.pop[idx][self.ID_POS] + H * c * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) ### Carnivore elif 1.0 / 3 <= rand and rand <= 2.0 / 3: # Eq. 24 pos_new = self.pop[idx][self.ID_POS] + H * c * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS]) ### Omnivore else: # Eq. 25 r5 = np.random.random() pos_new = self.pop[idx][self.ID_POS] + H * c * (r5 * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) + (1 - r5) * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS])) pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub) pop_new.append([pos_new, None]) pop_new = self.update_target_wrapper_population(pop_new) pop_new.append(deepcopy(self.pop[-1])) pop_new = self.greedy_selection_population(self.pop, pop_new) ## find current best used in decomposition _, best = self.get_global_best_solution(pop_new) ## Decomposition ### Eq. 10, 11, 12, 9 pop_child = [] for idx in range(0, self.pop_size): r3 = np.random.uniform() d = 3 * np.random.normal(0, 1) e = r3 * np.random.randint(1, 3) - 1 h = 2 * r3 - 1 # x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * agent_i[self.ID_POS]) if np.random.random() < 0.5: beta = 1 - (1 - 0) * ((epoch + 1) / self.epoch) # Eq. 21 r_idx = np.random.choice(list(set(range(0, self.pop_size)) - {idx})) x_r = pop_new[r_idx][self.ID_POS] # x_r = pop[np.random.randint(0, self.pop_size-1)][self.ID_POS] if np.random.random() < 0.5: x_new = beta * x_r + (1 - beta) * pop_new[idx][self.ID_POS] else: x_new = (1 - beta) * x_r + beta * pop_new[idx][self.ID_POS] else: x_new = best[self.ID_POS] + d * (e * best[self.ID_POS] - h * pop_new[idx][self.ID_POS]) # x_new = best[self.ID_POS] + np.random.normal() * best[self.ID_POS] pos_new = self.amend_position(x_new, self.problem.lb, self.problem.ub) pop_child.append([pos_new, None]) pop_child = self.update_target_wrapper_population(pop_child) self.pop = self.greedy_selection_population(pop_new, pop_child) class AdaptiveAEO(Optimizer): """ The original version of: Adaptive Artificial Ecosystem Optimization (AAEO) Links: 1. https://doi.org/10.1109/ACCESS.2020.2973351 Notes ~~~~~ + Used linear weight factor reduce from 2 to 0 through time + Applied Levy-flight technique and the global best solution Examples ~~~~~~~~ >>> import numpy as np >>> from mealpy.system_based.AEO import AdaptiveAEO >>> >>> def fitness_function(solution): >>> return np.sum(solution**2) >>> >>> problem_dict1 = { >>> "fit_func": fitness_function, >>> "lb": [-10, -15, -4, -2, -8], >>> "ub": [10, 15, 12, 8, 20], >>> "minmax": "min", >>> } >>> >>> epoch = 1000 >>> pop_size = 50 >>> model = AdaptiveAEO(problem_dict1, epoch, pop_size) >>> best_position, best_fitness = model.solve() >>> print(f"Solution: {best_position}, Fitness: {best_fitness}") References ~~~~~~~~~~ [1] Under Review """ def __init__(self, problem, epoch=10000, pop_size=100, **kwargs): """ Args: problem (dict): The problem dictionary epoch (int): maximum number of iterations, default = 10000 pop_size (int): number of population size, default = 100 """ super().__init__(problem, kwargs) self.epoch = self.validator.check_int("epoch", epoch, [1, 100000]) self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000]) self.nfe_per_epoch = 2 * self.pop_size self.sort_flag = True def evolve(self, epoch): """ The main operations (equations) of algorithm. Inherit from Optimizer class Args: epoch (int): The current iteration """ ## Production - Update the worst agent # Eq. 2, 3, 1 wf = 2 * (1 - (epoch + 1) / self.epoch) # Weight factor a = (1.0 - epoch / self.epoch) * np.random.random() x1 = (1 - a) * self.pop[-1][self.ID_POS] + a * np.random.uniform(self.problem.lb, self.problem.ub) pos_new = self.amend_position(x1, self.problem.lb, self.problem.ub) target = self.get_target_wrapper(pos_new) self.pop[-1] = [pos_new, target] ## Consumption - Update the whole population left pop_new = [] for idx in range(0, self.pop_size - 1): if np.random.random() < 0.5: rand = np.random.random() # Eq. 4, 5, 6 c = 0.5 * np.random.normal(0, 1) / abs(np.random.normal(0, 1)) # Consumption factor if idx == 0: j = 1 else: j = np.random.randint(0, idx) ### Herbivore if rand < 1.0 / 3: pos_new = self.pop[idx][self.ID_POS] + wf * c * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) # Eq. 6 ### Omnivore elif 1.0 / 3 <= rand <= 2.0 / 3: pos_new = self.pop[idx][self.ID_POS] + wf * c * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS]) # Eq. 7 ### Carnivore else: r2 = np.random.uniform() pos_new = self.pop[idx][self.ID_POS] + wf * c * (r2 * (self.pop[idx][self.ID_POS] - self.pop[0][self.ID_POS]) + (1 - r2) * (self.pop[idx][self.ID_POS] - self.pop[j][self.ID_POS])) else: pos_new = self.pop[idx][self.ID_POS] + self.get_levy_flight_step(1., 0.0001, case=-1) * \ (1.0 / np.sqrt(epoch + 1)) * np.sign(np.random.random() - 0.5) * (self.pop[idx][self.ID_POS] - self.g_best[self.ID_POS]) pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub) pop_new.append([pos_new, None]) pop_new = self.update_target_wrapper_population(pop_new) pop_new.append(deepcopy(self.pop[-1])) pop_new = self.greedy_selection_population(self.pop, pop_new) ## find current best used in decomposition _, best = self.get_global_best_solution(pop_new) ## Decomposition ### Eq. 10, 11, 12, 9 idx, pop, g_best, local_best pop_child = [] for idx in range(0, self.pop_size): if np.random.random() < 0.5: pos_new = best[self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) * (best[self.ID_POS] - pop_new[idx][self.ID_POS]) else: pos_new = best[self.ID_POS] + self.get_levy_flight_step(0.75, 0.001, case=-1) * \ 1.0 / np.sqrt(epoch + 1) * np.sign(np.random.random() - 0.5) * (best[self.ID_POS] - pop_new[idx][self.ID_POS]) pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub) pop_child.append([pos_new, None]) pop_child = self.update_target_wrapper_population(pop_child) self.pop = self.greedy_selection_population(pop_new, pop_child)
42.576299
146
0.540245
3,525
26,227
3.859291
0.088511
0.059688
0.076742
0.051161
0.884887
0.857395
0.847251
0.843649
0.833578
0.830859
0
0.049383
0.311282
26,227
615
147
42.645528
0.703759
0.308537
0
0.858156
0
0
0.00307
0
0
0
0
0
0
1
0.035461
false
0
0.010638
0
0.06383
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
df48f68f09cd9e3c161d880cc990b0cf100bd630
3,143
py
Python
stock_index_analyzer/stock_analyzer.py
billy0920/pygui
61ee2cebb22a0c8bb595cba39da33bd9988de9ba
[ "MIT" ]
null
null
null
stock_index_analyzer/stock_analyzer.py
billy0920/pygui
61ee2cebb22a0c8bb595cba39da33bd9988de9ba
[ "MIT" ]
null
null
null
stock_index_analyzer/stock_analyzer.py
billy0920/pygui
61ee2cebb22a0c8bb595cba39da33bd9988de9ba
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import time import datetime import requests # http://push2his.eastmoney.com/api/qt/stock/kline/get?cb=jQuery112401779541629980841_1575296121360&secid=1.000001&ut=fa5fd1943c7b386f172d6893dbfba10b&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58&klt=101&fqt=0&beg=19900101&end=20220101&_=1575296121361 def get_next_month_date(): now = datetime.datetime.now() if now.day < 15: next_month = now + datetime.timedelta(days=31) else: next_month = now + datetime.timedelta(days=21) return "%04d%02d01" % (next_month.year, next_month.month) def get_stock_day_data(code): url = r'http://push2his.eastmoney.com/api/qt/stock/kline/get' session = requests.session() """Accept: */* Accept-Encoding: gzip, deflate Accept-Language: zh-CN,zh;q=0.9 Connection: keep-alive Cookie: qgqp_b_id=f4f0f8a3737f39cad08b5740421122e7; em_hq_fls=js; st_si=32527865650162; st_asi=delete; em-quote-version=topspeed; HAList=f-0-000001-%u4E0A%u8BC1%u6307%u6570%2Ca-sz-300059-%u4E1C%u65B9%u8D22%u5BCC; st_pvi=14243683468449; st_sp=2019-12-02%2000%3A09%3A46; st_inirUrl=https%3A%2F%2Fwww.baidu.com%2Flink; st_sn=11; st_psi=20191202221521546-113200301324-0912382810 Host: push2his.eastmoney.com Referer: http://quote.eastmoney.com/zs000001.html User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36""" headers = { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Accept-Language": "zh-CN,zh;q=0.9", "Connection": "keep-alive", "Cookie": "qgqp_b_id=f4f0f8a3737f39cad08b5740421122e7; em_hq_fls=js; st_si=32527865650162; st_asi=delete; em-quote-version=topspeed; HAList=f-0-000001-%u4E0A%u8BC1%u6307%u6570%2Ca-sz-300059-%u4E1C%u65B9%u8D22%u5BCC; st_pvi=14243683468449; st_sp=2019-12-02%2000%3A09%3A46; st_inirUrl=https%3A%2F%2Fwww.baidu.com%2Flink; st_sn=11; st_psi=20191202221521546-113200301324-0912382810", "Host": "push2his.eastmoney.com", "Referer": "http://quote.eastmoney.com/zs000001.html", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36", } """ cb: jQuery112401779541629980841_1575296121360 secid: 1.000001 ut: fa5fd1943c7b386f172d6893dbfba10b fields1: f1,f2,f3,f4,f5 fields2: f51,f52,f53,f54,f55,f56,f57,f58 klt: 101 fqt: 0 beg: 19900101 end: 20220101 _: 1575296121361 """ params = { "cb": "jQuery112401779541629980841_1575296121360", "secid": "1.%06d"%code, "ut": "fa5fd1943c7b386f172d6893dbfba10b", "fields1": "f1,f2,f3,f4,f5", "fields2": "f51,f52,f53,f54,f55,f56,f57,f58", "klt": "101", "fqt": "0", "beg": "19900101", "end": get_next_month_date(), "_": "%d" % int(time.time()*1000) } print(params) resp = session.get(url, headers=headers, params=params) print(resp.content) if __name__ == '__main__': datetime.timedelta() get_stock_day_data(1)
46.910448
387
0.688832
432
3,143
4.891204
0.416667
0.034075
0.037861
0.06673
0.800284
0.777567
0.746332
0.746332
0.746332
0.675343
0
0.272522
0.155902
3,143
67
388
46.910448
0.523935
0.09895
0
0
0
0.04878
0.488865
0.24226
0
0
0
0
0
1
0.04878
false
0
0.073171
0
0.146341
0.04878
0
0
0
null
0
0
0
1
1
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
80050ce95c259058b2f4012582f1d2f9451b8dd2
174
py
Python
ckstyle/reporter/Reporter.py
wangjeaf/CSSCheckStyle
d1b1ed89c61ca80d65f398ec4a07d73789197b04
[ "BSD-3-Clause" ]
21
2015-04-27T14:54:45.000Z
2021-11-08T09:12:08.000Z
ckstyle/reporter/Reporter.py
wangjeaf/CSSCheckStyle
d1b1ed89c61ca80d65f398ec4a07d73789197b04
[ "BSD-3-Clause" ]
null
null
null
ckstyle/reporter/Reporter.py
wangjeaf/CSSCheckStyle
d1b1ed89c61ca80d65f398ec4a07d73789197b04
[ "BSD-3-Clause" ]
6
2015-03-02T08:08:59.000Z
2016-03-16T14:52:38.000Z
class Reporter(): def __init__(self, checker): pass def doReport(self): pass def appendMsg(self): pass def export(self): pass
17.4
32
0.545977
19
174
4.789474
0.526316
0.230769
0.241758
0
0
0
0
0
0
0
0
0
0.362069
174
9
33
19.333333
0.81982
0
0
0.444444
0
0
0
0
0
0
0
0
0
1
0.444444
false
0.444444
0
0
0.555556
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
7
804c437e2753a14aa5ca04eba1ef9a05738fbd22
316,685
py
Python
baasplus/python/antchain_sdk_baasplus/models.py
alipay/antchain-openapi-prod-sdk
f78549e5135d91756093bd88d191ca260b28e083
[ "MIT" ]
6
2020-06-28T06:40:50.000Z
2022-02-25T11:02:18.000Z
baasplus/python/antchain_sdk_baasplus/models.py
alipay/antchain-openapi-prod-sdk
f78549e5135d91756093bd88d191ca260b28e083
[ "MIT" ]
null
null
null
baasplus/python/antchain_sdk_baasplus/models.py
alipay/antchain-openapi-prod-sdk
f78549e5135d91756093bd88d191ca260b28e083
[ "MIT" ]
6
2020-06-30T09:29:03.000Z
2022-01-07T10:42:22.000Z
# -*- coding: utf-8 -*- # This file is auto-generated, don't edit it. Thanks. from Tea.model import TeaModel from typing import List class Config(TeaModel): """ Model for initing client """ def __init__( self, access_key_id: str = None, access_key_secret: str = None, security_token: str = None, protocol: str = None, read_timeout: int = None, connect_timeout: int = None, http_proxy: str = None, https_proxy: str = None, endpoint: str = None, no_proxy: str = None, max_idle_conns: int = None, user_agent: str = None, socks_5proxy: str = None, socks_5net_work: str = None, max_idle_time_millis: int = None, keep_alive_duration_millis: int = None, max_requests: int = None, max_requests_per_host: int = None, ): # accesskey id self.access_key_id = access_key_id # accesskey secret self.access_key_secret = access_key_secret # security token self.security_token = security_token # http protocol self.protocol = protocol # read timeout self.read_timeout = read_timeout # connect timeout self.connect_timeout = connect_timeout # http proxy self.http_proxy = http_proxy # https proxy self.https_proxy = https_proxy # endpoint self.endpoint = endpoint # proxy white list self.no_proxy = no_proxy # max idle conns self.max_idle_conns = max_idle_conns # user agent self.user_agent = user_agent # socks5 proxy self.socks_5proxy = socks_5proxy # socks5 network self.socks_5net_work = socks_5net_work # 长链接最大空闲时长 self.max_idle_time_millis = max_idle_time_millis # 长链接最大连接时长 self.keep_alive_duration_millis = keep_alive_duration_millis # 最大连接数(长链接最大总数) self.max_requests = max_requests # 每个目标主机的最大连接数(分主机域名的长链接最大总数 self.max_requests_per_host = max_requests_per_host def validate(self): pass def to_map(self): result = dict() if self.access_key_id is not None: result['accessKeyId'] = self.access_key_id if self.access_key_secret is not None: result['accessKeySecret'] = self.access_key_secret if self.security_token is not None: result['securityToken'] = self.security_token if self.protocol is not None: result['protocol'] = self.protocol if self.read_timeout is not None: result['readTimeout'] = self.read_timeout if self.connect_timeout is not None: result['connectTimeout'] = self.connect_timeout if self.http_proxy is not None: result['httpProxy'] = self.http_proxy if self.https_proxy is not None: result['httpsProxy'] = self.https_proxy if self.endpoint is not None: result['endpoint'] = self.endpoint if self.no_proxy is not None: result['noProxy'] = self.no_proxy if self.max_idle_conns is not None: result['maxIdleConns'] = self.max_idle_conns if self.user_agent is not None: result['userAgent'] = self.user_agent if self.socks_5proxy is not None: result['socks5Proxy'] = self.socks_5proxy if self.socks_5net_work is not None: result['socks5NetWork'] = self.socks_5net_work if self.max_idle_time_millis is not None: result['maxIdleTimeMillis'] = self.max_idle_time_millis if self.keep_alive_duration_millis is not None: result['keepAliveDurationMillis'] = self.keep_alive_duration_millis if self.max_requests is not None: result['maxRequests'] = self.max_requests if self.max_requests_per_host is not None: result['maxRequestsPerHost'] = self.max_requests_per_host return result def from_map(self, m: dict = None): m = m or dict() if m.get('accessKeyId') is not None: self.access_key_id = m.get('accessKeyId') if m.get('accessKeySecret') is not None: self.access_key_secret = m.get('accessKeySecret') if m.get('securityToken') is not None: self.security_token = m.get('securityToken') if m.get('protocol') is not None: self.protocol = m.get('protocol') if m.get('readTimeout') is not None: self.read_timeout = m.get('readTimeout') if m.get('connectTimeout') is not None: self.connect_timeout = m.get('connectTimeout') if m.get('httpProxy') is not None: self.http_proxy = m.get('httpProxy') if m.get('httpsProxy') is not None: self.https_proxy = m.get('httpsProxy') if m.get('endpoint') is not None: self.endpoint = m.get('endpoint') if m.get('noProxy') is not None: self.no_proxy = m.get('noProxy') if m.get('maxIdleConns') is not None: self.max_idle_conns = m.get('maxIdleConns') if m.get('userAgent') is not None: self.user_agent = m.get('userAgent') if m.get('socks5Proxy') is not None: self.socks_5proxy = m.get('socks5Proxy') if m.get('socks5NetWork') is not None: self.socks_5net_work = m.get('socks5NetWork') if m.get('maxIdleTimeMillis') is not None: self.max_idle_time_millis = m.get('maxIdleTimeMillis') if m.get('keepAliveDurationMillis') is not None: self.keep_alive_duration_millis = m.get('keepAliveDurationMillis') if m.get('maxRequests') is not None: self.max_requests = m.get('maxRequests') if m.get('maxRequestsPerHost') is not None: self.max_requests_per_host = m.get('maxRequestsPerHost') return self class BlockInfo(TeaModel): def __init__( self, biz_id: str = None, block_hash: str = None, parent_hash: str = None, height: int = None, timestamp: int = None, biz_data: str = None, transaction_size: int = None, version: str = None, size: int = None, ): # 区块链唯一性标识 self.biz_id = biz_id # 区块哈希 self.block_hash = block_hash # 上一个区块的hash self.parent_hash = parent_hash # 块高 self.height = height # 出块时间 self.timestamp = timestamp # 业务数据 self.biz_data = biz_data # 包含交易数 self.transaction_size = transaction_size # 版本 self.version = version # size self.size = size def validate(self): self.validate_required(self.biz_id, 'biz_id') self.validate_required(self.block_hash, 'block_hash') self.validate_required(self.parent_hash, 'parent_hash') self.validate_required(self.height, 'height') self.validate_required(self.timestamp, 'timestamp') self.validate_required(self.biz_data, 'biz_data') self.validate_required(self.transaction_size, 'transaction_size') self.validate_required(self.version, 'version') self.validate_required(self.size, 'size') def to_map(self): result = dict() if self.biz_id is not None: result['biz_id'] = self.biz_id if self.block_hash is not None: result['block_hash'] = self.block_hash if self.parent_hash is not None: result['parent_hash'] = self.parent_hash if self.height is not None: result['height'] = self.height if self.timestamp is not None: result['timestamp'] = self.timestamp if self.biz_data is not None: result['biz_data'] = self.biz_data if self.transaction_size is not None: result['transaction_size'] = self.transaction_size if self.version is not None: result['version'] = self.version if self.size is not None: result['size'] = self.size return result def from_map(self, m: dict = None): m = m or dict() if m.get('biz_id') is not None: self.biz_id = m.get('biz_id') if m.get('block_hash') is not None: self.block_hash = m.get('block_hash') if m.get('parent_hash') is not None: self.parent_hash = m.get('parent_hash') if m.get('height') is not None: self.height = m.get('height') if m.get('timestamp') is not None: self.timestamp = m.get('timestamp') if m.get('biz_data') is not None: self.biz_data = m.get('biz_data') if m.get('transaction_size') is not None: self.transaction_size = m.get('transaction_size') if m.get('version') is not None: self.version = m.get('version') if m.get('size') is not None: self.size = m.get('size') return self class Institution(TeaModel): def __init__( self, code: str = None, name: str = None, ): # 人行联行号/行政地区编码 self.code = code # 银行全称/行政地区名称 self.name = name def validate(self): self.validate_required(self.code, 'code') self.validate_required(self.name, 'name') def to_map(self): result = dict() if self.code is not None: result['code'] = self.code if self.name is not None: result['name'] = self.name return result def from_map(self, m: dict = None): m = m or dict() if m.get('code') is not None: self.code = m.get('code') if m.get('name') is not None: self.name = m.get('name') return self class AntiPiracyResultObject(TeaModel): def __init__( self, infr_host: str = None, infr_time: int = None, infr_title: str = None, infr_url: str = None, production_type: str = None, similarity: str = None, ): # 侵权主体 self.infr_host = infr_host # 侵权内容上传时间,number of milliseconds since the epoch of 1970-01-01T00:00:00Z self.infr_time = infr_time # 侵权标题 self.infr_title = infr_title # 侵权网址 self.infr_url = infr_url # 默认值:VIDEO self.production_type = production_type # 相似度 self.similarity = similarity def validate(self): pass def to_map(self): result = dict() if self.infr_host is not None: result['infr_host'] = self.infr_host if self.infr_time is not None: result['infr_time'] = self.infr_time if self.infr_title is not None: result['infr_title'] = self.infr_title if self.infr_url is not None: result['infr_url'] = self.infr_url if self.production_type is not None: result['production_type'] = self.production_type if self.similarity is not None: result['similarity'] = self.similarity return result def from_map(self, m: dict = None): m = m or dict() if m.get('infr_host') is not None: self.infr_host = m.get('infr_host') if m.get('infr_time') is not None: self.infr_time = m.get('infr_time') if m.get('infr_title') is not None: self.infr_title = m.get('infr_title') if m.get('infr_url') is not None: self.infr_url = m.get('infr_url') if m.get('production_type') is not None: self.production_type = m.get('production_type') if m.get('similarity') is not None: self.similarity = m.get('similarity') return self class HitDetectItems(TeaModel): def __init__( self, detect_type_code: str = None, hit_detect_resource: str = None, hit_content: str = None, detect_resource_level: str = None, ): # RULEORMODEL("RULEORMODEL", "规则或模型"), KEYWORDS("KEYWORDS", "关键字检测 "), REPEAT_MODEL("REPEAT_MODEL", "防重复模型"), REGEX("regex", "正则表达式"), URL("url", "URL检测"), SEXY_PIC("sexyPic", "黄图检测"), SAMPLE_PIC("samplePic", "样图检测"), OCR("ocr", "图文识别"), PICTURE_FACE("picture_face","图片人脸检测"), QRCODE("QRCode", "二维码检测"), MDP_MODEL("mdpModel", "mdp检测"), ANTI_SPAM_MODEL("anti_spam_model", "反垃圾模型"); self.detect_type_code = detect_type_code # 命中的检测项的资源: 如命中关键字,则存关键字,如命中正则表达式,则保存正则表达式 self.hit_detect_resource = hit_detect_resource # 保存被命中的内容: 如正则表达式,则保存被正则表达式命中的内容 self.hit_content = hit_content # 级别 self.detect_resource_level = detect_resource_level def validate(self): pass def to_map(self): result = dict() if self.detect_type_code is not None: result['detect_type_code'] = self.detect_type_code if self.hit_detect_resource is not None: result['hit_detect_resource'] = self.hit_detect_resource if self.hit_content is not None: result['hit_content'] = self.hit_content if self.detect_resource_level is not None: result['detect_resource_level'] = self.detect_resource_level return result def from_map(self, m: dict = None): m = m or dict() if m.get('detect_type_code') is not None: self.detect_type_code = m.get('detect_type_code') if m.get('hit_detect_resource') is not None: self.hit_detect_resource = m.get('hit_detect_resource') if m.get('hit_content') is not None: self.hit_content = m.get('hit_content') if m.get('detect_resource_level') is not None: self.detect_resource_level = m.get('detect_resource_level') return self class BizInfo(TeaModel): def __init__( self, client_tenent: str = None, code: str = None, product_code: str = None, ): # BPWZPFCN self.client_tenent = client_tenent # 业务代码 self.code = code # 内部产品码 self.product_code = product_code def validate(self): pass def to_map(self): result = dict() if self.client_tenent is not None: result['client_tenent'] = self.client_tenent if self.code is not None: result['code'] = self.code if self.product_code is not None: result['product_code'] = self.product_code return result def from_map(self, m: dict = None): m = m or dict() if m.get('client_tenent') is not None: self.client_tenent = m.get('client_tenent') if m.get('code') is not None: self.code = m.get('code') if m.get('product_code') is not None: self.product_code = m.get('product_code') return self class DidDocServicesInfo(TeaModel): def __init__( self, extension: str = None, id: str = None, service_endpoint: str = None, type: str = None, ): # 服务的扩展字段 self.extension = extension # 服务ID,必须保证该服务ID在did doc中是唯一的。对于保留类型服务: DidAuthService, 有且只能有一个,并且id必须为didauth-1; VerifiableClaimRepository, 有且只有一个,并且id必须为vcrepository-1; self.id = id # 服务的可访问地址 self.service_endpoint = service_endpoint # 服务类型,必须是已经注册的服务类型,或者是默认保留的服务类型 self.type = type def validate(self): self.validate_required(self.id, 'id') self.validate_required(self.service_endpoint, 'service_endpoint') self.validate_required(self.type, 'type') def to_map(self): result = dict() if self.extension is not None: result['extension'] = self.extension if self.id is not None: result['id'] = self.id if self.service_endpoint is not None: result['service_endpoint'] = self.service_endpoint if self.type is not None: result['type'] = self.type return result def from_map(self, m: dict = None): m = m or dict() if m.get('extension') is not None: self.extension = m.get('extension') if m.get('id') is not None: self.id = m.get('id') if m.get('service_endpoint') is not None: self.service_endpoint = m.get('service_endpoint') if m.get('type') is not None: self.type = m.get('type') return self class UpdateBmpbrowserPrivilegeRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, phone_numbers: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # bizid self.bizid = bizid # 取消查看权限的支付宝电话号码集合 self.phone_numbers = phone_numbers def validate(self): self.validate_required(self.bizid, 'bizid') self.validate_required(self.phone_numbers, 'phone_numbers') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid if self.phone_numbers is not None: result['phone_numbers'] = self.phone_numbers return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('phone_numbers') is not None: self.phone_numbers = m.get('phone_numbers') return self class UpdateBmpbrowserPrivilegeResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, status: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 批量更新权限成功与否 self.status = status def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.status is not None: result['status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('status') is not None: self.status = m.get('status') return self class QueryIndividualidInternalmaskRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, cert_no: str = None, mobile: str = None, name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用于内部统计的参数,外部用户请忽略 self.biz_info = biz_info # 被核验人身份证号码后四位 self.cert_no = cert_no # 被核验人手机号码 self.mobile = mobile # 被核验人姓名的一部分 self.name = name def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.mobile, 'mobile') self.validate_required(self.name, 'name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.cert_no is not None: result['cert_no'] = self.cert_no if self.mobile is not None: result['mobile'] = self.mobile if self.name is not None: result['name'] = self.name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('mobile') is not None: self.mobile = m.get('mobile') if m.get('name') is not None: self.name = m.get('name') return self class QueryIndividualidInternalmaskResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, accepted: bool = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 是否通过 self.accepted = accepted # self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.accepted is not None: result['accepted'] = self.accepted if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('accepted') is not None: self.accepted = m.get('accepted') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class QueryEnterpriseidInternalfourmetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, ep_cert_name: str = None, ep_cert_no: str = None, ep_cert_type: str = None, legal_person_cert_name: str = None, legal_person_cert_no: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用于内部统计的参数,外部用户请忽略 self.biz_info = biz_info # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 企业证件类型(NATIONAL_LEGAL(工商注册号)或 NATIONAL_LEGAL_MERGE ( 社会统一信用代码)) self.ep_cert_type = ep_cert_type # 法人姓名 self.legal_person_cert_name = legal_person_cert_name # 企业法人身份证号码 self.legal_person_cert_no = legal_person_cert_no def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.ep_cert_type, 'ep_cert_type') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') self.validate_required(self.legal_person_cert_no, 'legal_person_cert_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.ep_cert_type is not None: result['ep_cert_type'] = self.ep_cert_type if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name if self.legal_person_cert_no is not None: result['legal_person_cert_no'] = self.legal_person_cert_no return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('ep_cert_type') is not None: self.ep_cert_type = m.get('ep_cert_type') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') if m.get('legal_person_cert_no') is not None: self.legal_person_cert_no = m.get('legal_person_cert_no') return self class QueryEnterpriseidInternalfourmetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 企业经营状态 self.enterprise_status = enterprise_status # 营业期限 self.open_time = open_time # 认证是否通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryEnterpriseidInternalthreemetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, ep_cert_name: str = None, ep_cert_no: str = None, ep_cert_type: str = None, legal_person_cert_name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用于内部统计的参数,外部用户请忽略 self.biz_info = biz_info # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 证件类型 self.ep_cert_type = ep_cert_type # 法人姓名 self.legal_person_cert_name = legal_person_cert_name def validate(self): if self.biz_info: self.biz_info.validate() self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.ep_cert_type, 'ep_cert_type') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.ep_cert_type is not None: result['ep_cert_type'] = self.ep_cert_type if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('ep_cert_type') is not None: self.ep_cert_type = m.get('ep_cert_type') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') return self class QueryEnterpriseidInternalthreemetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 经营状态 self.enterprise_status = enterprise_status # 营业期限 self.open_time = open_time # 认证是否通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryEnterpriseidInternaltwometaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, ep_cert_name: str = None, ep_cert_no: str = None, ep_cert_type: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用于内部统计的参数,外部用户请忽略 self.biz_info = biz_info # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 企业证件类型(NATIONAL_LEGAL(工商注册号)或 NATIONAL_LEGAL_MERGE ( 社会统一信用代码 self.ep_cert_type = ep_cert_type def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.ep_cert_type, 'ep_cert_type') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.ep_cert_type is not None: result['ep_cert_type'] = self.ep_cert_type return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('ep_cert_type') is not None: self.ep_cert_type = m.get('ep_cert_type') return self class QueryEnterpriseidInternaltwometaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 经营状态 self.enterprise_status = enterprise_status # 营业期限 self.open_time = open_time # 认证是否通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class InitEnterpriseidFaceauthRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, ep_cert_type: str = None, legal_person_cert_name: str = None, legal_person_cert_no: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 企业证件类型(NATIONAL_LEGAL(工商注册号)或 NATIONAL_LEGAL_MERGE ( 社会统一信用代码)) self.ep_cert_type = ep_cert_type # 企业法人姓名 self.legal_person_cert_name = legal_person_cert_name # 企业法人身份证号(目前只支持身份证号) self.legal_person_cert_no = legal_person_cert_no def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.ep_cert_type, 'ep_cert_type') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') self.validate_required(self.legal_person_cert_no, 'legal_person_cert_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.ep_cert_type is not None: result['ep_cert_type'] = self.ep_cert_type if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name if self.legal_person_cert_no is not None: result['legal_person_cert_no'] = self.legal_person_cert_no return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('ep_cert_type') is not None: self.ep_cert_type = m.get('ep_cert_type') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') if m.get('legal_person_cert_no') is not None: self.legal_person_cert_no = m.get('legal_person_cert_no') return self class InitEnterpriseidFaceauthResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, biz_no: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 本次认证的业务唯一性标示 self.biz_no = biz_no def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.biz_no is not None: result['biz_no'] = self.biz_no return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') return self class QueryEnterpriseidFaceauthRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_no: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 认证的唯一性标示 self.biz_no = biz_no def validate(self): self.validate_required(self.biz_no, 'biz_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_no is not None: result['biz_no'] = self.biz_no return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') return self class QueryEnterpriseidFaceauthResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, biz_no: str = None, failed_code: str = None, failed_message: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性标示 self.biz_no = biz_no # 认证失败错误码 self.failed_code = failed_code # 认证失败原因信息 self.failed_message = failed_message # 是否认证通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.biz_no is not None: result['biz_no'] = self.biz_no if self.failed_code is not None: result['failed_code'] = self.failed_code if self.failed_message is not None: result['failed_message'] = self.failed_message if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') if m.get('failed_code') is not None: self.failed_code = m.get('failed_code') if m.get('failed_message') is not None: self.failed_message = m.get('failed_message') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryIndividualidInternalfourmetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bank_card_no: str = None, biz_info: BizInfo = None, cert_no: str = None, mobile: str = None, name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 被核验人银行卡号 self.bank_card_no = bank_card_no # 用于内部统计的参数,外部用户请忽略 self.biz_info = biz_info # 被核验人身份证号码 self.cert_no = cert_no # 被核验人手机号码 self.mobile = mobile # 被核验人姓名 self.name = name def validate(self): self.validate_required(self.bank_card_no, 'bank_card_no') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.mobile, 'mobile') self.validate_required(self.name, 'name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bank_card_no is not None: result['bank_card_no'] = self.bank_card_no if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.cert_no is not None: result['cert_no'] = self.cert_no if self.mobile is not None: result['mobile'] = self.mobile if self.name is not None: result['name'] = self.name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bank_card_no') is not None: self.bank_card_no = m.get('bank_card_no') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('mobile') is not None: self.mobile = m.get('mobile') if m.get('name') is not None: self.name = m.get('name') return self class QueryIndividualidInternalfourmetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, accepted: bool = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 是否通过 self.accepted = accepted # self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.accepted is not None: result['accepted'] = self.accepted if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('accepted') is not None: self.accepted = m.get('accepted') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class QueryIndividualidInternalthreemetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, cert_no: str = None, mobile: str = None, name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用于内部统计的参数,外部用户请忽略 self.biz_info = biz_info # 被核验人身份证号码 self.cert_no = cert_no # 被核验人手机号码 self.mobile = mobile # 被核验人姓名 self.name = name def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.mobile, 'mobile') self.validate_required(self.name, 'name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.cert_no is not None: result['cert_no'] = self.cert_no if self.mobile is not None: result['mobile'] = self.mobile if self.name is not None: result['name'] = self.name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('mobile') is not None: self.mobile = m.get('mobile') if m.get('name') is not None: self.name = m.get('name') return self class QueryIndividualidInternalthreemetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, accepted: bool = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 是否通过 self.accepted = accepted # self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.accepted is not None: result['accepted'] = self.accepted if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('accepted') is not None: self.accepted = m.get('accepted') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class QueryIndividualidInternaltwometaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, cert_no: str = None, name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用于内部统计的参数,外部用户请忽略 self.biz_info = biz_info # 被核验人身份证号码 self.cert_no = cert_no # 被核验人姓名 self.name = name def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.name, 'name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.cert_no is not None: result['cert_no'] = self.cert_no if self.name is not None: result['name'] = self.name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('name') is not None: self.name = m.get('name') return self class QueryIndividualidInternaltwometaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, accepted: bool = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 是否通过 self.accepted = accepted # self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.accepted is not None: result['accepted'] = self.accepted if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('accepted') is not None: self.accepted = m.get('accepted') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class CreateBaicorpInternalevaluationasyncRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_id: str = None, biz_info: BizInfo = None, callback: str = None, callback_param: str = None, custom_id: str = None, entity_data: str = None, entity_type: str = None, entity_url: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 场景ID self.biz_id = biz_id # 内部的业务代码 self.biz_info = biz_info # 回调地址。由于存在异步调用,部分结果通过回调返回数据。例如图片和视频。 self.callback = callback # 回调参数 self.callback_param = callback_param # 业务自定义id,便于识别返回数据对应关系 self.custom_id = custom_id # 待检测内容RAW数据,目前仅用于text类型,entity_url和entity_data不可同时存在 self.entity_data = entity_data # 待评估内容类型 self.entity_type = entity_type # 检测内容url,支持HTTPS, entity_url和entity_data不可同时存在 self.entity_url = entity_url def validate(self): self.validate_required(self.biz_id, 'biz_id') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.custom_id, 'custom_id') self.validate_required(self.entity_type, 'entity_type') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_id is not None: result['biz_id'] = self.biz_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.callback is not None: result['callback'] = self.callback if self.callback_param is not None: result['callback_param'] = self.callback_param if self.custom_id is not None: result['custom_id'] = self.custom_id if self.entity_data is not None: result['entity_data'] = self.entity_data if self.entity_type is not None: result['entity_type'] = self.entity_type if self.entity_url is not None: result['entity_url'] = self.entity_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_id') is not None: self.biz_id = m.get('biz_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('callback') is not None: self.callback = m.get('callback') if m.get('callback_param') is not None: self.callback_param = m.get('callback_param') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('entity_data') is not None: self.entity_data = m.get('entity_data') if m.get('entity_type') is not None: self.entity_type = m.get('entity_type') if m.get('entity_url') is not None: self.entity_url = m.get('entity_url') return self class CreateBaicorpInternalevaluationasyncResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, content_score: str = None, content_score_desc: str = None, custom_id: str = None, repeat_reason: str = None, repeat_result: bool = None, risk_result: str = None, risk_result_desc: str = None, task_id: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 内容质量分 self.content_score = content_score # 质量分描述 self.content_score_desc = content_score_desc # 业务自定义id self.custom_id = custom_id # 导致重复的原因 self.repeat_reason = repeat_reason # 是否重复 self.repeat_result = repeat_result # 风险识别结果 self.risk_result = risk_result # 风险识别结果描述 self.risk_result_desc = risk_result_desc # 监测任务ID self.task_id = task_id def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.content_score is not None: result['content_score'] = self.content_score if self.content_score_desc is not None: result['content_score_desc'] = self.content_score_desc if self.custom_id is not None: result['custom_id'] = self.custom_id if self.repeat_reason is not None: result['repeat_reason'] = self.repeat_reason if self.repeat_result is not None: result['repeat_result'] = self.repeat_result if self.risk_result is not None: result['risk_result'] = self.risk_result if self.risk_result_desc is not None: result['risk_result_desc'] = self.risk_result_desc if self.task_id is not None: result['task_id'] = self.task_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('content_score') is not None: self.content_score = m.get('content_score') if m.get('content_score_desc') is not None: self.content_score_desc = m.get('content_score_desc') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('repeat_reason') is not None: self.repeat_reason = m.get('repeat_reason') if m.get('repeat_result') is not None: self.repeat_result = m.get('repeat_result') if m.get('risk_result') is not None: self.risk_result = m.get('risk_result') if m.get('risk_result_desc') is not None: self.risk_result_desc = m.get('risk_result_desc') if m.get('task_id') is not None: self.task_id = m.get('task_id') return self class CreateBaicorpInternalmonitorasyncRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_id: str = None, biz_info: BizInfo = None, broadcast_time: int = None, custom_id: str = None, entity_data: str = None, entity_type: str = None, entity_url: str = None, key_words: List[str] = None, matched_duration: int = None, monitor_duration: int = None, monitor_frequency: int = None, monitor_scope: List[str] = None, notify_url: str = None, provider_id: str = None, start_date: int = None, task_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 场景id self.biz_id = biz_id # 用于内部业务统计的信息 self.biz_info = biz_info # 监测传播时间,单位暂定天 self.broadcast_time = broadcast_time # 基于安全考虑,填充NonceId self.custom_id = custom_id # 待检测内容RAW数据 self.entity_data = entity_data # 待监测内容类型 self.entity_type = entity_type # 待检测内容url,支持HTTP和OSS,OSS从默认源拉取,input_url和input_data不可同时存在 self.entity_url = entity_url # 监测输入的多个关键词 self.key_words = key_words # 匹配时长,单位秒 self.matched_duration = matched_duration # 监测时长,单位为天 self.monitor_duration = monitor_duration # 监测频次,单位暂定天 self.monitor_frequency = monitor_frequency # 监测范围 self.monitor_scope = monitor_scope # 监测事件发送时的回调通知URL,若为空则不发送通知,24小时内最少发送成功一次 self.notify_url = notify_url # provider id self.provider_id = provider_id # 监测启动日期,若为空,则立刻开始,从1970开始的毫秒数。 self.start_date = start_date # 业务方任务id,业务方保证唯一 self.task_id = task_id def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.entity_type, 'entity_type') self.validate_required(self.key_words, 'key_words') self.validate_required(self.monitor_frequency, 'monitor_frequency') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_id is not None: result['biz_id'] = self.biz_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.broadcast_time is not None: result['broadcast_time'] = self.broadcast_time if self.custom_id is not None: result['custom_id'] = self.custom_id if self.entity_data is not None: result['entity_data'] = self.entity_data if self.entity_type is not None: result['entity_type'] = self.entity_type if self.entity_url is not None: result['entity_url'] = self.entity_url if self.key_words is not None: result['key_words'] = self.key_words if self.matched_duration is not None: result['matched_duration'] = self.matched_duration if self.monitor_duration is not None: result['monitor_duration'] = self.monitor_duration if self.monitor_frequency is not None: result['monitor_frequency'] = self.monitor_frequency if self.monitor_scope is not None: result['monitor_scope'] = self.monitor_scope if self.notify_url is not None: result['notify_url'] = self.notify_url if self.provider_id is not None: result['provider_id'] = self.provider_id if self.start_date is not None: result['start_date'] = self.start_date if self.task_id is not None: result['task_id'] = self.task_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_id') is not None: self.biz_id = m.get('biz_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('broadcast_time') is not None: self.broadcast_time = m.get('broadcast_time') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('entity_data') is not None: self.entity_data = m.get('entity_data') if m.get('entity_type') is not None: self.entity_type = m.get('entity_type') if m.get('entity_url') is not None: self.entity_url = m.get('entity_url') if m.get('key_words') is not None: self.key_words = m.get('key_words') if m.get('matched_duration') is not None: self.matched_duration = m.get('matched_duration') if m.get('monitor_duration') is not None: self.monitor_duration = m.get('monitor_duration') if m.get('monitor_frequency') is not None: self.monitor_frequency = m.get('monitor_frequency') if m.get('monitor_scope') is not None: self.monitor_scope = m.get('monitor_scope') if m.get('notify_url') is not None: self.notify_url = m.get('notify_url') if m.get('provider_id') is not None: self.provider_id = m.get('provider_id') if m.get('start_date') is not None: self.start_date = m.get('start_date') if m.get('task_id') is not None: self.task_id = m.get('task_id') return self class CreateBaicorpInternalmonitorasyncResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, monitor_duration: int = None, start_date: int = None, task_id: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 检测时长,单位为天 self.monitor_duration = monitor_duration # 监测启动日期,若为空,则立刻开始,从1970开始的毫秒数。 self.start_date = start_date # 业务方任务id,业务方保证唯一 self.task_id = task_id def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.monitor_duration is not None: result['monitor_duration'] = self.monitor_duration if self.start_date is not None: result['start_date'] = self.start_date if self.task_id is not None: result['task_id'] = self.task_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('monitor_duration') is not None: self.monitor_duration = m.get('monitor_duration') if m.get('start_date') is not None: self.start_date = m.get('start_date') if m.get('task_id') is not None: self.task_id = m.get('task_id') return self class QueryBaicorpInternalevaluationasyncRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_id: str = None, biz_info: BizInfo = None, custom_id: str = None, task_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 产品ID self.biz_id = biz_id # bizinfo self.biz_info = biz_info # 基于安全考虑,填充NonceId self.custom_id = custom_id # 监测任务Id self.task_id = task_id def validate(self): self.validate_required(self.biz_id, 'biz_id') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.custom_id, 'custom_id') self.validate_required(self.task_id, 'task_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_id is not None: result['biz_id'] = self.biz_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.custom_id is not None: result['custom_id'] = self.custom_id if self.task_id is not None: result['task_id'] = self.task_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_id') is not None: self.biz_id = m.get('biz_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('task_id') is not None: self.task_id = m.get('task_id') return self class QueryBaicorpInternalevaluationasyncResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, custom_id: str = None, risk_result: str = None, risk_result_desc: str = None, task_id: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 业务自定义id self.custom_id = custom_id # 风险识别结果 self.risk_result = risk_result # 风险识别结果描述 self.risk_result_desc = risk_result_desc # 监测任务ID self.task_id = task_id def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.custom_id is not None: result['custom_id'] = self.custom_id if self.risk_result is not None: result['risk_result'] = self.risk_result if self.risk_result_desc is not None: result['risk_result_desc'] = self.risk_result_desc if self.task_id is not None: result['task_id'] = self.task_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('risk_result') is not None: self.risk_result = m.get('risk_result') if m.get('risk_result_desc') is not None: self.risk_result_desc = m.get('risk_result_desc') if m.get('task_id') is not None: self.task_id = m.get('task_id') return self class QueryBaicorpInternalmonitorasyncRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_id: str = None, task_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 业务方产品ID self.biz_id = biz_id # 业务方任务id,业务方保证唯一 self.task_id = task_id def validate(self): self.validate_required(self.biz_id, 'biz_id') self.validate_required(self.task_id, 'task_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_id is not None: result['biz_id'] = self.biz_id if self.task_id is not None: result['task_id'] = self.task_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_id') is not None: self.biz_id = m.get('biz_id') if m.get('task_id') is not None: self.task_id = m.get('task_id') return self class QueryBaicorpInternalmonitorasyncResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, data: List[AntiPiracyResultObject] = None, err_msg: str = None, status: str = None, task_id: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 数据 self.data = data # 如果字段status == "failed",该字段保存相关错误信息 self.err_msg = err_msg # success 数据入库成功,后续处于被检测状态; # continue 数据处于被检测状态,data 字段包含监测结果 # failed 任务失败 self.status = status # 任务ID self.task_id = task_id def validate(self): if self.data: for k in self.data: if k: k.validate() def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg result['data'] = [] if self.data is not None: for k in self.data: result['data'].append(k.to_map() if k else None) if self.err_msg is not None: result['err_msg'] = self.err_msg if self.status is not None: result['status'] = self.status if self.task_id is not None: result['task_id'] = self.task_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') self.data = [] if m.get('data') is not None: for k in m.get('data'): temp_model = AntiPiracyResultObject() self.data.append(temp_model.from_map(k)) if m.get('err_msg') is not None: self.err_msg = m.get('err_msg') if m.get('status') is not None: self.status = m.get('status') if m.get('task_id') is not None: self.task_id = m.get('task_id') return self class CertifyEnterpriseidFaceauthRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_no: str = None, callback_url: str = None, redirect_url: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 认证的唯一性标示 self.biz_no = biz_no # 回调通知地址 self.callback_url = callback_url # 认证完成后回跳地址 # self.redirect_url = redirect_url def validate(self): self.validate_required(self.biz_no, 'biz_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_no is not None: result['biz_no'] = self.biz_no if self.callback_url is not None: result['callback_url'] = self.callback_url if self.redirect_url is not None: result['redirect_url'] = self.redirect_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') if m.get('callback_url') is not None: self.callback_url = m.get('callback_url') if m.get('redirect_url') is not None: self.redirect_url = m.get('redirect_url') return self class CertifyEnterpriseidFaceauthResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, biz_no: str = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性标示 # self.biz_no = biz_no # 认证url # self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.biz_no is not None: result['biz_no'] = self.biz_no if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class InitIndividualidFaceauthRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, cert_name: str = None, cert_no: str = None, biz_code: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 姓名 self.cert_name = cert_name # 身份证号 self.cert_no = cert_no # 认证方式,FACE表示在支付宝内进行认证,FACE_SDK表示在客户的应用中进行认证 # 默认为FACE self.biz_code = biz_code def validate(self): self.validate_required(self.cert_name, 'cert_name') self.validate_required(self.cert_no, 'cert_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.cert_name is not None: result['cert_name'] = self.cert_name if self.cert_no is not None: result['cert_no'] = self.cert_no if self.biz_code is not None: result['biz_code'] = self.biz_code return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('cert_name') is not None: self.cert_name = m.get('cert_name') if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('biz_code') is not None: self.biz_code = m.get('biz_code') return self class InitIndividualidFaceauthResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, certify_id: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性id self.certify_id = certify_id def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.certify_id is not None: result['certify_id'] = self.certify_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') return self class CertifyIndividualidFaceauthRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, callback_url: str = None, certify_id: str = None, redirect_url: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 回调通知地址 self.callback_url = callback_url # 认证的唯一性id self.certify_id = certify_id # 认证完成后回跳地址 # self.redirect_url = redirect_url def validate(self): self.validate_required(self.certify_id, 'certify_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.callback_url is not None: result['callback_url'] = self.callback_url if self.certify_id is not None: result['certify_id'] = self.certify_id if self.redirect_url is not None: result['redirect_url'] = self.redirect_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('callback_url') is not None: self.callback_url = m.get('callback_url') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') if m.get('redirect_url') is not None: self.redirect_url = m.get('redirect_url') return self class CertifyIndividualidFaceauthResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, certify_id: str = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性id self.certify_id = certify_id # 认证url self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.certify_id is not None: result['certify_id'] = self.certify_id if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class QueryIndividualidFaceauthRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, certify_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 认证的唯一性id self.certify_id = certify_id def validate(self): self.validate_required(self.certify_id, 'certify_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.certify_id is not None: result['certify_id'] = self.certify_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') return self class QueryIndividualidFaceauthResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, certify_id: str = None, passed: bool = None, finished: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性id # self.certify_id = certify_id # 是否认证通过 self.passed = passed # 用户是否完成刷脸 self.finished = finished def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.certify_id is not None: result['certify_id'] = self.certify_id if self.passed is not None: result['passed'] = self.passed if self.finished is not None: result['finished'] = self.finished return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('finished') is not None: self.finished = m.get('finished') return self class GetDataserviceBlockchainheightRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 区块链的唯一性标示 self.bizid = bizid def validate(self): self.validate_required(self.bizid, 'bizid') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') return self class GetDataserviceBlockchainheightResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, data: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 区块链块高 self.data = data def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.data is not None: result['data'] = self.data return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('data') is not None: self.data = m.get('data') return self class GetDataserviceTransactioncountRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 区块链唯一性标示 self.bizid = bizid def validate(self): self.validate_required(self.bizid, 'bizid') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') return self class GetDataserviceTransactioncountResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, data: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 交易总数 self.data = data def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.data is not None: result['data'] = self.data return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('data') is not None: self.data = m.get('data') return self class GetDataserviceTransactioninfoRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, hash: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 区块链唯一性标识 self.bizid = bizid # 交易hash self.hash = hash def validate(self): self.validate_required(self.bizid, 'bizid') self.validate_required(self.hash, 'hash') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid if self.hash is not None: result['hash'] = self.hash return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('hash') is not None: self.hash = m.get('hash') return self class GetDataserviceTransactioninfoResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, bizid: str = None, category: int = None, create_time: int = None, from_hash: str = None, hash: str = None, height: int = None, to_hash: str = None, type: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 区块链唯一性标识 self.bizid = bizid # category self.category = category # 交易发起时间 self.create_time = create_time # 交易发起方哈希 self.from_hash = from_hash # 交易哈希 self.hash = hash # 块高 self.height = height # 交易接收方哈希 self.to_hash = to_hash # 交易类型 self.type = type def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.bizid is not None: result['bizid'] = self.bizid if self.category is not None: result['category'] = self.category if self.create_time is not None: result['create_time'] = self.create_time if self.from_hash is not None: result['from_hash'] = self.from_hash if self.hash is not None: result['hash'] = self.hash if self.height is not None: result['height'] = self.height if self.to_hash is not None: result['to_hash'] = self.to_hash if self.type is not None: result['type'] = self.type return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('category') is not None: self.category = m.get('category') if m.get('create_time') is not None: self.create_time = m.get('create_time') if m.get('from_hash') is not None: self.from_hash = m.get('from_hash') if m.get('hash') is not None: self.hash = m.get('hash') if m.get('height') is not None: self.height = m.get('height') if m.get('to_hash') is not None: self.to_hash = m.get('to_hash') if m.get('type') is not None: self.type = m.get('type') return self class ListDataserviceLastblocksRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, size: int = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 区块链唯一性标识 self.bizid = bizid # 区块个数 self.size = size def validate(self): self.validate_required(self.bizid, 'bizid') self.validate_required(self.size, 'size') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid if self.size is not None: result['size'] = self.size return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('size') is not None: self.size = m.get('size') return self class ListDataserviceLastblocksResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, last_block_list: List[BlockInfo] = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 区块信息 self.last_block_list = last_block_list def validate(self): if self.last_block_list: for k in self.last_block_list: if k: k.validate() def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg result['last_block_list'] = [] if self.last_block_list is not None: for k in self.last_block_list: result['last_block_list'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') self.last_block_list = [] if m.get('last_block_list') is not None: for k in m.get('last_block_list'): temp_model = BlockInfo() self.last_block_list.append(temp_model.from_map(k)) return self class GetTasAttestationRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, algorithm: str = None, cert_req: bool = None, compress: bool = None, rid: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 摘要算法默认,(sha256或者sm3 默认sm3) self.algorithm = algorithm # tsr中是否保存证书,true表示保存,false表示不保存(默认为false) self.cert_req = cert_req # 返回tsr是否压缩精简,true表示要压缩精简,false表示不压缩精简 (默认为true) self.compress = compress # 事物hash(支持sha256或sm3摘要算法),长度64个字符。 self.rid = rid def validate(self): self.validate_required(self.rid, 'rid') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.algorithm is not None: result['algorithm'] = self.algorithm if self.cert_req is not None: result['cert_req'] = self.cert_req if self.compress is not None: result['compress'] = self.compress if self.rid is not None: result['rid'] = self.rid return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('algorithm') is not None: self.algorithm = m.get('algorithm') if m.get('cert_req') is not None: self.cert_req = m.get('cert_req') if m.get('compress') is not None: self.compress = m.get('compress') if m.get('rid') is not None: self.rid = m.get('rid') return self class GetTasAttestationResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, ctsr: str = None, sn: str = None, ts: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 精简后的时间戳完整编码(在校验时需要提交) self.ctsr = ctsr # serialNumber,凭证编号 (在校验的时需要先填写凭证编号) self.sn = sn # 时间信息,从1970年1月1日起至当前时间的毫秒数(13位数字) self.ts = ts def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.ctsr is not None: result['ctsr'] = self.ctsr if self.sn is not None: result['sn'] = self.sn if self.ts is not None: result['ts'] = self.ts return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('ctsr') is not None: self.ctsr = m.get('ctsr') if m.get('sn') is not None: self.sn = m.get('sn') if m.get('ts') is not None: self.ts = m.get('ts') return self class VerifyTasCtsrRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ctsr: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 请求时间凭证接口返回的ctsr参数 self.ctsr = ctsr def validate(self): self.validate_required(self.ctsr, 'ctsr') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ctsr is not None: result['ctsr'] = self.ctsr return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ctsr') is not None: self.ctsr = m.get('ctsr') return self class VerifyTasCtsrResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, app_name: str = None, company_name: str = None, desc: str = None, hash_value: str = None, sn: str = None, ts: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 应用名 self.app_name = app_name # 公司名 self.company_name = company_name # 事务步骤的描述 self.desc = desc # 请求时间凭证时传入的事物hash self.hash_value = hash_value # serialNumber,凭证编号 (在校验的时需要先填写凭证编号) self.sn = sn # 时间信息,从1970年1月1日起至当前时间的毫秒数(13位数字) self.ts = ts def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.app_name is not None: result['app_name'] = self.app_name if self.company_name is not None: result['company_name'] = self.company_name if self.desc is not None: result['desc'] = self.desc if self.hash_value is not None: result['hash_value'] = self.hash_value if self.sn is not None: result['sn'] = self.sn if self.ts is not None: result['ts'] = self.ts return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('app_name') is not None: self.app_name = m.get('app_name') if m.get('company_name') is not None: self.company_name = m.get('company_name') if m.get('desc') is not None: self.desc = m.get('desc') if m.get('hash_value') is not None: self.hash_value = m.get('hash_value') if m.get('sn') is not None: self.sn = m.get('sn') if m.get('ts') is not None: self.ts = m.get('ts') return self class GetTasCertificateRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, sn: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # serialNumber,凭证编号 (在校验的时需要先填写凭证编号) self.sn = sn def validate(self): self.validate_required(self.sn, 'sn') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.sn is not None: result['sn'] = self.sn return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('sn') is not None: self.sn = m.get('sn') return self class GetTasCertificateResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 下载pdf格式证书的临时url self.url = url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.url is not None: result['url'] = self.url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('url') is not None: self.url = m.get('url') return self class GetTasTransactionattestationRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, rid: str = None, algorithm: str = None, compress: bool = None, cert_req: bool = None, trans_id: str = None, desc: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 事物hash(支持sha256或sm3摘要算法) # 长度64个字符。 self.rid = rid # 摘要算法默认,(sha256或者sm3 默认sm3) self.algorithm = algorithm # 返回tsr是否压缩精简,true表示要压缩精简,false表示不压缩精简 (默认为true) self.compress = compress # tsr中是否保存证书,true表示保存,false表示不保存(默认为false) self.cert_req = cert_req # 事务id,允许大小写数字且小于十位的字符串 self.trans_id = trans_id # 对事务的描述,长度小于20位 self.desc = desc def validate(self): self.validate_required(self.rid, 'rid') self.validate_required(self.trans_id, 'trans_id') self.validate_required(self.desc, 'desc') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.rid is not None: result['rid'] = self.rid if self.algorithm is not None: result['algorithm'] = self.algorithm if self.compress is not None: result['compress'] = self.compress if self.cert_req is not None: result['cert_req'] = self.cert_req if self.trans_id is not None: result['trans_id'] = self.trans_id if self.desc is not None: result['desc'] = self.desc return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('rid') is not None: self.rid = m.get('rid') if m.get('algorithm') is not None: self.algorithm = m.get('algorithm') if m.get('compress') is not None: self.compress = m.get('compress') if m.get('cert_req') is not None: self.cert_req = m.get('cert_req') if m.get('trans_id') is not None: self.trans_id = m.get('trans_id') if m.get('desc') is not None: self.desc = m.get('desc') return self class GetTasTransactionattestationResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, sn: str = None, ctsr: str = None, ts: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # serialNumber,凭证编号 (在校验的时需要先填写凭证编号) self.sn = sn # 精简后的时间戳完整编码(在校验时需要提交) self.ctsr = ctsr # 时间信息,从1970年1月1日起至当前时间的毫秒数(13位数字) self.ts = ts def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.sn is not None: result['sn'] = self.sn if self.ctsr is not None: result['ctsr'] = self.ctsr if self.ts is not None: result['ts'] = self.ts return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('sn') is not None: self.sn = m.get('sn') if m.get('ctsr') is not None: self.ctsr = m.get('ctsr') if m.get('ts') is not None: self.ts = m.get('ts') return self class QueryEverifyFourmetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, legal_person_cert_name: str = None, legal_person_cert_no: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 某某有限公司 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 法人姓名 self.legal_person_cert_name = legal_person_cert_name # 企业法人身份证号码 self.legal_person_cert_no = legal_person_cert_no def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') self.validate_required(self.legal_person_cert_no, 'legal_person_cert_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name if self.legal_person_cert_no is not None: result['legal_person_cert_no'] = self.legal_person_cert_no return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') if m.get('legal_person_cert_no') is not None: self.legal_person_cert_no = m.get('legal_person_cert_no') return self class QueryEverifyFourmetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, code: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 0:核验成功 # 1:企业信息有误 # 2:企业非正常营业 self.code = code # 企业经营状态 self.enterprise_status = enterprise_status # 营业期限 self.open_time = open_time # 认证是否通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.code is not None: result['code'] = self.code if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('code') is not None: self.code = m.get('code') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryEverifyThreemetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, legal_person_cert_name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 法人姓名 self.legal_person_cert_name = legal_person_cert_name def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') return self class QueryEverifyThreemetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, code: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 0:核验成功 # 1:企业信息有误 # 2:企业非正常营业 self.code = code # 经营状态 self.enterprise_status = enterprise_status # 营业期限 self.open_time = open_time # 认证是否通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.code is not None: result['code'] = self.code if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('code') is not None: self.code = m.get('code') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryEverifyTwometaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') return self class QueryEverifyTwometaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, code: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 经营状态 self.enterprise_status = enterprise_status # 营业期限 self.open_time = open_time # 认证是否通过 self.passed = passed # 0:核验成功 # 1:企业信息有误 # 2:企业非正常营业 self.code = code def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed if self.code is not None: result['code'] = self.code return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('code') is not None: self.code = m.get('code') return self class QueryBaicorpInternalsearchlibraryRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, account_id: str = None, biz_id: str = None, company_id: str = None, content_id: str = None, custom_id: str = None, entity_data: str = None, entity_desc: str = None, entity_type: str = None, entity_url: str = None, timestamp: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 账户ID,账户粒度ID。 self.account_id = account_id # 产品ID self.biz_id = biz_id # 商户ID,即平台用户ID。 self.company_id = company_id # 内容ID self.content_id = content_id # 基于安全考虑,填充NonceId self.custom_id = custom_id # 待检测内容的raw data,这期暂不使用 self.entity_data = entity_data # 待检测字段的描述信息,包括标题、描述或关键词,json格式字符串 self.entity_desc = entity_desc # 待检测内容类型,[TEXT, PICTURE, VIDEO, HTML] self.entity_type = entity_type # 1、待检测内容oss url(后续可以扩展为非oss的文件url) # 2、假如使用AK访问,此处填写fileid。 self.entity_url = entity_url # 时间戳 self.timestamp = timestamp def validate(self): self.validate_required(self.account_id, 'account_id') self.validate_required(self.biz_id, 'biz_id') self.validate_required(self.company_id, 'company_id') self.validate_required(self.content_id, 'content_id') self.validate_required(self.custom_id, 'custom_id') self.validate_required(self.entity_type, 'entity_type') self.validate_required(self.entity_url, 'entity_url') self.validate_required(self.timestamp, 'timestamp') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.account_id is not None: result['account_id'] = self.account_id if self.biz_id is not None: result['biz_id'] = self.biz_id if self.company_id is not None: result['company_id'] = self.company_id if self.content_id is not None: result['content_id'] = self.content_id if self.custom_id is not None: result['custom_id'] = self.custom_id if self.entity_data is not None: result['entity_data'] = self.entity_data if self.entity_desc is not None: result['entity_desc'] = self.entity_desc if self.entity_type is not None: result['entity_type'] = self.entity_type if self.entity_url is not None: result['entity_url'] = self.entity_url if self.timestamp is not None: result['timestamp'] = self.timestamp return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('account_id') is not None: self.account_id = m.get('account_id') if m.get('biz_id') is not None: self.biz_id = m.get('biz_id') if m.get('company_id') is not None: self.company_id = m.get('company_id') if m.get('content_id') is not None: self.content_id = m.get('content_id') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('entity_data') is not None: self.entity_data = m.get('entity_data') if m.get('entity_desc') is not None: self.entity_desc = m.get('entity_desc') if m.get('entity_type') is not None: self.entity_type = m.get('entity_type') if m.get('entity_url') is not None: self.entity_url = m.get('entity_url') if m.get('timestamp') is not None: self.timestamp = m.get('timestamp') return self class QueryBaicorpInternalsearchlibraryResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, custom_id: str = None, model_info: str = None, repeat_info: str = None, similarity_info: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # NoucelId self.custom_id = custom_id # 采用的模型以及版本说明 self.model_info = model_info # 重复列表,json list格式 self.repeat_info = repeat_info # 相似度信息列表,json list格式 self.similarity_info = similarity_info def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.custom_id is not None: result['custom_id'] = self.custom_id if self.model_info is not None: result['model_info'] = self.model_info if self.repeat_info is not None: result['repeat_info'] = self.repeat_info if self.similarity_info is not None: result['similarity_info'] = self.similarity_info return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('model_info') is not None: self.model_info = m.get('model_info') if m.get('repeat_info') is not None: self.repeat_info = m.get('repeat_info') if m.get('similarity_info') is not None: self.similarity_info = m.get('similarity_info') return self class UpdateBaicorpInternalsearchlibraryRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, account_id: str = None, biz_id: str = None, company_id: str = None, content_id: str = None, custom_id: str = None, entity_data: str = None, entity_desc: str = None, entity_type: str = None, entity_url: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 账户ID,账户粒度ID。 self.account_id = account_id # 产品ID,[BANQUAN, PAIPAI] self.biz_id = biz_id # 商户ID,即平台用户ID。 self.company_id = company_id # 内容ID self.content_id = content_id # # 基于安全考虑,填充NonceId。 self.custom_id = custom_id # 待检测内容的raw data,这期暂不使用 self.entity_data = entity_data # 待检测字段的描述信息,包括标题、描述或关键词,json格式字符串。 self.entity_desc = entity_desc # 待检测内容类型,[TEXT, PICTURE, VIDEO, HTML] self.entity_type = entity_type # 1.待检测内容oss url(后续可以扩展为非oss的文件url)。 # 2.假如使用AK访问,此处填写fileid。 self.entity_url = entity_url def validate(self): self.validate_required(self.account_id, 'account_id') self.validate_required(self.biz_id, 'biz_id') self.validate_required(self.company_id, 'company_id') self.validate_required(self.content_id, 'content_id') self.validate_required(self.custom_id, 'custom_id') self.validate_required(self.entity_data, 'entity_data') self.validate_required(self.entity_desc, 'entity_desc') self.validate_required(self.entity_type, 'entity_type') self.validate_required(self.entity_url, 'entity_url') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.account_id is not None: result['account_id'] = self.account_id if self.biz_id is not None: result['biz_id'] = self.biz_id if self.company_id is not None: result['company_id'] = self.company_id if self.content_id is not None: result['content_id'] = self.content_id if self.custom_id is not None: result['custom_id'] = self.custom_id if self.entity_data is not None: result['entity_data'] = self.entity_data if self.entity_desc is not None: result['entity_desc'] = self.entity_desc if self.entity_type is not None: result['entity_type'] = self.entity_type if self.entity_url is not None: result['entity_url'] = self.entity_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('account_id') is not None: self.account_id = m.get('account_id') if m.get('biz_id') is not None: self.biz_id = m.get('biz_id') if m.get('company_id') is not None: self.company_id = m.get('company_id') if m.get('content_id') is not None: self.content_id = m.get('content_id') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('entity_data') is not None: self.entity_data = m.get('entity_data') if m.get('entity_desc') is not None: self.entity_desc = m.get('entity_desc') if m.get('entity_type') is not None: self.entity_type = m.get('entity_type') if m.get('entity_url') is not None: self.entity_url = m.get('entity_url') return self class UpdateBaicorpInternalsearchlibraryResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, custom_id: str = None, update_result: str = None, update_desc: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # NounceId self.custom_id = custom_id # 更新是否成功 self.update_result = update_result # 更新描述、更新失败原因 self.update_desc = update_desc def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.custom_id is not None: result['custom_id'] = self.custom_id if self.update_result is not None: result['update_result'] = self.update_result if self.update_desc is not None: result['update_desc'] = self.update_desc return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('custom_id') is not None: self.custom_id = m.get('custom_id') if m.get('update_result') is not None: self.update_result = m.get('update_result') if m.get('update_desc') is not None: self.update_desc = m.get('update_desc') return self class QueryEpayauthRootbankRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bank_name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 支持全称,或部分名称 如果不传名称,系统默认将返回热门银行,如果用户期望的银行不是热门银行,可以建议用户输入银行名称进行查询。 self.bank_name = bank_name def validate(self): self.validate_required(self.bank_name, 'bank_name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bank_name is not None: result['bank_name'] = self.bank_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bank_name') is not None: self.bank_name = m.get('bank_name') return self class QueryEpayauthRootbankResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, bank_details: List[Institution] = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 银行列表 self.bank_details = bank_details def validate(self): if self.bank_details: for k in self.bank_details: if k: k.validate() def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg result['bank_details'] = [] if self.bank_details is not None: for k in self.bank_details: result['bank_details'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') self.bank_details = [] if m.get('bank_details') is not None: for k in m.get('bank_details'): temp_model = Institution() self.bank_details.append(temp_model.from_map(k)) return self class QueryYdapplyprotEcapplyRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, cert_no: str = None, mobile: str = None, user_name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用户证件号码 self.cert_no = cert_no # 用户手机号码 self.mobile = mobile # 用户姓名 self.user_name = user_name def validate(self): self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.mobile, 'mobile') self.validate_required(self.user_name, 'user_name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.cert_no is not None: result['cert_no'] = self.cert_no if self.mobile is not None: result['mobile'] = self.mobile if self.user_name is not None: result['user_name'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('mobile') is not None: self.mobile = m.get('mobile') if m.get('user_name') is not None: self.user_name = m.get('user_name') return self class QueryYdapplyprotEcapplyResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, passed: bool = None, score: str = None, strategies: List[str] = None, decision: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 核验是否通过 self.passed = passed # 风险分 self.score = score # 命中的策略列表 self.strategies = strategies # 风险决策结果 self.decision = decision def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.passed is not None: result['passed'] = self.passed if self.score is not None: result['score'] = self.score if self.strategies is not None: result['strategies'] = self.strategies if self.decision is not None: result['decision'] = self.decision return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('score') is not None: self.score = m.get('score') if m.get('strategies') is not None: self.strategies = m.get('strategies') if m.get('decision') is not None: self.decision = m.get('decision') return self class QueryYdpacprotEcpacRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, mobile: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用户手机号 self.mobile = mobile def validate(self): self.validate_required(self.mobile, 'mobile') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.mobile is not None: result['mobile'] = self.mobile return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('mobile') is not None: self.mobile = m.get('mobile') return self class QueryYdpacprotEcpacResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, passed: bool = None, score: str = None, strategies: List[str] = None, decision: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证是否通过 self.passed = passed # 模型分数 self.score = score # 命中策略列表 self.strategies = strategies # 风险决策结果 self.decision = decision def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.passed is not None: result['passed'] = self.passed if self.score is not None: result['score'] = self.score if self.strategies is not None: result['strategies'] = self.strategies if self.decision is not None: result['decision'] = self.decision return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('score') is not None: self.score = m.get('score') if m.get('strategies') is not None: self.strategies = m.get('strategies') if m.get('decision') is not None: self.decision = m.get('decision') return self class QueryYdauthprotTwometaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, cert_no: str = None, user_name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 被核验用户的身份证号 self.cert_no = cert_no # 被核验用户的姓名 self.user_name = user_name def validate(self): self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.user_name, 'user_name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.cert_no is not None: result['cert_no'] = self.cert_no if self.user_name is not None: result['user_name'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('user_name') is not None: self.user_name = m.get('user_name') return self class QueryYdauthprotTwometaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, passed: bool = None, score: str = None, strategies: List[str] = None, decision: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 核验是否通过 self.passed = passed # 风险分 self.score = score # 命中的策略列表 self.strategies = strategies # 风险决策结果 self.decision = decision def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.passed is not None: result['passed'] = self.passed if self.score is not None: result['score'] = self.score if self.strategies is not None: result['strategies'] = self.strategies if self.decision is not None: result['decision'] = self.decision return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('score') is not None: self.score = m.get('score') if m.get('strategies') is not None: self.strategies = m.get('strategies') if m.get('decision') is not None: self.decision = m.get('decision') return self class QueryYdauthprotThreemetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, cert_no: str = None, mobile: str = None, user_name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 被核验用户的身份证号 self.cert_no = cert_no # 被核验用户的手机号 self.mobile = mobile # 被核验用户姓名 self.user_name = user_name def validate(self): self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.mobile, 'mobile') self.validate_required(self.user_name, 'user_name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.cert_no is not None: result['cert_no'] = self.cert_no if self.mobile is not None: result['mobile'] = self.mobile if self.user_name is not None: result['user_name'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('mobile') is not None: self.mobile = m.get('mobile') if m.get('user_name') is not None: self.user_name = m.get('user_name') return self class QueryYdauthprotThreemetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, passed: bool = None, score: str = None, strategies: List[str] = None, decision: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 核验是否通过 self.passed = passed # 风险分 self.score = score # 命中的策略列表 self.strategies = strategies # 风险决策结果 self.decision = decision def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.passed is not None: result['passed'] = self.passed if self.score is not None: result['score'] = self.score if self.strategies is not None: result['strategies'] = self.strategies if self.decision is not None: result['decision'] = self.decision return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('score') is not None: self.score = m.get('score') if m.get('strategies') is not None: self.strategies = m.get('strategies') if m.get('decision') is not None: self.decision = m.get('decision') return self class QueryYdauthprotFourmetaRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, card_no: str = None, cert_no: str = None, mobile: str = None, user_name: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 被核验用户的银行卡号 self.card_no = card_no # 被核验用户的身份证号 self.cert_no = cert_no # 被核验用户的手机号 self.mobile = mobile # 被核验用户的姓名 self.user_name = user_name def validate(self): self.validate_required(self.card_no, 'card_no') self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.mobile, 'mobile') self.validate_required(self.user_name, 'user_name') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.card_no is not None: result['card_no'] = self.card_no if self.cert_no is not None: result['cert_no'] = self.cert_no if self.mobile is not None: result['mobile'] = self.mobile if self.user_name is not None: result['user_name'] = self.user_name return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('card_no') is not None: self.card_no = m.get('card_no') if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('mobile') is not None: self.mobile = m.get('mobile') if m.get('user_name') is not None: self.user_name = m.get('user_name') return self class QueryYdauthprotFourmetaResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, passed: bool = None, score: str = None, strategies: List[str] = None, decision: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 核验是否通过 self.passed = passed # 风险分 self.score = score # 命中的策略列表 self.strategies = strategies # 风险决策结果 self.decision = decision def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.passed is not None: result['passed'] = self.passed if self.score is not None: result['score'] = self.score if self.strategies is not None: result['strategies'] = self.strategies if self.decision is not None: result['decision'] = self.decision return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('score') is not None: self.score = m.get('score') if m.get('strategies') is not None: self.strategies = m.get('strategies') if m.get('decision') is not None: self.decision = m.get('decision') return self class QueryYdmktprotEcmarketcampaignRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, mobile: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用户手机号 self.mobile = mobile def validate(self): self.validate_required(self.mobile, 'mobile') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.mobile is not None: result['mobile'] = self.mobile return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('mobile') is not None: self.mobile = m.get('mobile') return self class QueryYdmktprotEcmarketcampaignResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, passed: bool = None, score: str = None, strategies: List[str] = None, decision: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 核验是否通过 self.passed = passed # 风险分 self.score = score # 命中的策略列表 self.strategies = strategies # 风险决策结果 self.decision = decision def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.passed is not None: result['passed'] = self.passed if self.score is not None: result['score'] = self.score if self.strategies is not None: result['strategies'] = self.strategies if self.decision is not None: result['decision'] = self.decision return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('score') is not None: self.score = m.get('score') if m.get('strategies') is not None: self.strategies = m.get('strategies') if m.get('decision') is not None: self.decision = m.get('decision') return self class QueryYdregprotEcregisterRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, mobile: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 用户手机号 self.mobile = mobile def validate(self): self.validate_required(self.mobile, 'mobile') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.mobile is not None: result['mobile'] = self.mobile return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('mobile') is not None: self.mobile = m.get('mobile') return self class QueryYdregprotEcregisterResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, passed: bool = None, score: str = None, strategies: List[str] = None, decision: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 是否核验通过 self.passed = passed # 风险分 self.score = score # 命中的策略列表 self.strategies = strategies # 风险决策结果 self.decision = decision def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.passed is not None: result['passed'] = self.passed if self.score is not None: result['score'] = self.score if self.strategies is not None: result['strategies'] = self.strategies if self.decision is not None: result['decision'] = self.decision return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('score') is not None: self.score = m.get('score') if m.get('strategies') is not None: self.strategies = m.get('strategies') if m.get('decision') is not None: self.decision = m.get('decision') return self class QueryEpayauthBranchbankRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bank_name: str = None, district_code: str = None, root_bank_code: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 银行名称,支持全称,或部分名称 # bank_name和district_code至少有一个不为空 self.bank_name = bank_name # 行政地区编码 # bank_name和district_code至少有一个不为空 self.district_code = district_code # 总行联行号 self.root_bank_code = root_bank_code def validate(self): self.validate_required(self.root_bank_code, 'root_bank_code') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bank_name is not None: result['bank_name'] = self.bank_name if self.district_code is not None: result['district_code'] = self.district_code if self.root_bank_code is not None: result['root_bank_code'] = self.root_bank_code return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bank_name') is not None: self.bank_name = m.get('bank_name') if m.get('district_code') is not None: self.district_code = m.get('district_code') if m.get('root_bank_code') is not None: self.root_bank_code = m.get('root_bank_code') return self class QueryEpayauthBranchbankResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, bank_details: List[Institution] = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # Institution列表 self.bank_details = bank_details def validate(self): if self.bank_details: for k in self.bank_details: if k: k.validate() def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg result['bank_details'] = [] if self.bank_details is not None: for k in self.bank_details: result['bank_details'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') self.bank_details = [] if m.get('bank_details') is not None: for k in m.get('bank_details'): temp_model = Institution() self.bank_details.append(temp_model.from_map(k)) return self class QueryEpayauthDistrictRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, parent_code: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 父级行政地区编码。 不填则默认查询省级行政地区编码,支持省市县三级查询。 self.parent_code = parent_code def validate(self): self.validate_required(self.parent_code, 'parent_code') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.parent_code is not None: result['parent_code'] = self.parent_code return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('parent_code') is not None: self.parent_code = m.get('parent_code') return self class QueryEpayauthDistrictResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, district_details: List[Institution] = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # District列表 self.district_details = district_details def validate(self): if self.district_details: for k in self.district_details: if k: k.validate() def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg result['district_details'] = [] if self.district_details is not None: for k in self.district_details: result['district_details'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') self.district_details = [] if m.get('district_details') is not None: for k in m.get('district_details'): temp_model = Institution() self.district_details.append(temp_model.from_map(k)) return self class InitEpayauthVerifyRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bank_card_no: str = None, bank_code: str = None, callback_url: str = None, ep_cert_name: str = None, ep_cert_no: str = None, legal_person_cert_name: str = None, legal_person_cert_no: str = None, mobile: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 待认证银行卡号 # self.bank_card_no = bank_card_no # 人行联行号 self.bank_code = bank_code # 回调通知地址 self.callback_url = callback_url # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 法人姓名 self.legal_person_cert_name = legal_person_cert_name # 企业法人身份证号码 self.legal_person_cert_no = legal_person_cert_no # 手机号码 用于接收打款验证通知短信 self.mobile = mobile def validate(self): self.validate_required(self.bank_card_no, 'bank_card_no') self.validate_required(self.bank_code, 'bank_code') self.validate_required(self.callback_url, 'callback_url') self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') self.validate_required(self.legal_person_cert_no, 'legal_person_cert_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bank_card_no is not None: result['bank_card_no'] = self.bank_card_no if self.bank_code is not None: result['bank_code'] = self.bank_code if self.callback_url is not None: result['callback_url'] = self.callback_url if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name if self.legal_person_cert_no is not None: result['legal_person_cert_no'] = self.legal_person_cert_no if self.mobile is not None: result['mobile'] = self.mobile return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bank_card_no') is not None: self.bank_card_no = m.get('bank_card_no') if m.get('bank_code') is not None: self.bank_code = m.get('bank_code') if m.get('callback_url') is not None: self.callback_url = m.get('callback_url') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') if m.get('legal_person_cert_no') is not None: self.legal_person_cert_no = m.get('legal_person_cert_no') if m.get('mobile') is not None: self.mobile = m.get('mobile') return self class InitEpayauthVerifyResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, verify_id: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 2017070610120520200000000051240001626725 self.verify_id = verify_id def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.verify_id is not None: result['verify_id'] = self.verify_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('verify_id') is not None: self.verify_id = m.get('verify_id') return self class QueryEpayauthVerifyRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, amount: str = None, currency: str = None, verify_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 打款金额,只支持两位小数点的正数,单位:元 self.amount = amount # 支付币种 self.currency = currency # 打款验证ID 打款验证受理后生成的一个唯一标识 self.verify_id = verify_id def validate(self): self.validate_required(self.amount, 'amount') self.validate_required(self.currency, 'currency') self.validate_required(self.verify_id, 'verify_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.amount is not None: result['amount'] = self.amount if self.currency is not None: result['currency'] = self.currency if self.verify_id is not None: result['verify_id'] = self.verify_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('amount') is not None: self.amount = m.get('amount') if m.get('currency') is not None: self.currency = m.get('currency') if m.get('verify_id') is not None: self.verify_id = m.get('verify_id') return self class QueryEpayauthVerifyResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, valid: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 验证是否成功 self.valid = valid def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.valid is not None: result['valid'] = self.valid return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('valid') is not None: self.valid = m.get('valid') return self class QueryBmpbrowserTransactionqrcodeRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, contract_id: str = None, hash: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 蚂蚁区块链的唯一链id self.bizid = bizid # 链上合约id self.contract_id = contract_id # 蚂蚁区块链的链上交易hash值 self.hash = hash def validate(self): self.validate_required(self.bizid, 'bizid') self.validate_required(self.hash, 'hash') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid if self.contract_id is not None: result['contract_id'] = self.contract_id if self.hash is not None: result['hash'] = self.hash return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('contract_id') is not None: self.contract_id = m.get('contract_id') if m.get('hash') is not None: self.hash = m.get('hash') return self class QueryBmpbrowserTransactionqrcodeResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, qr_code_download_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 交易二维码二进制内容的Base64编码 self.qr_code_download_url = qr_code_download_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.qr_code_download_url is not None: result['qr_code_download_url'] = self.qr_code_download_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('qr_code_download_url') is not None: self.qr_code_download_url = m.get('qr_code_download_url') return self class AddBmpbrowserPrivilegeRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, phone_numbers: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 蚂蚁区块链的唯一链id self.bizid = bizid # 授权查看权限的支付宝电话号码集合 self.phone_numbers = phone_numbers def validate(self): self.validate_required(self.bizid, 'bizid') self.validate_required(self.phone_numbers, 'phone_numbers') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid if self.phone_numbers is not None: result['phone_numbers'] = self.phone_numbers return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('phone_numbers') is not None: self.phone_numbers = m.get('phone_numbers') return self class AddBmpbrowserPrivilegeResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, status: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 批量添加权限成功与否 self.status = status def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.status is not None: result['status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('status') is not None: self.status = m.get('status') return self class QueryIdcocrIdcardRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, image_content: str = None, side: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 身份证图片base64编码内容 self.image_content = image_content # face: 身份证正面 # back: 身份证反面 self.side = side def validate(self): self.validate_required(self.image_content, 'image_content') self.validate_required(self.side, 'side') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.image_content is not None: result['image_content'] = self.image_content if self.side is not None: result['side'] = self.side return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('image_content') is not None: self.image_content = m.get('image_content') if m.get('side') is not None: self.side = m.get('side') return self class QueryIdcocrIdcardResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, address: str = None, birth: str = None, error_content: str = None, num: str = None, sex: str = None, success: bool = None, end_date: str = None, issue: str = None, start_date: str = None, nationality: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 地址 self.address = address # 出生年月日 self.birth = birth # 信息抽取失败后详细错误原因 self.error_content = error_content # 身份证号码 self.num = num # 性别:男/女 self.sex = sex # 解析成功 self.success = success # 有效期截止时间 self.end_date = end_date # 公安局分案 self.issue = issue # 有效期开始时间 self.start_date = start_date # 民族 self.nationality = nationality def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.address is not None: result['address'] = self.address if self.birth is not None: result['birth'] = self.birth if self.error_content is not None: result['error_content'] = self.error_content if self.num is not None: result['num'] = self.num if self.sex is not None: result['sex'] = self.sex if self.success is not None: result['success'] = self.success if self.end_date is not None: result['end_date'] = self.end_date if self.issue is not None: result['issue'] = self.issue if self.start_date is not None: result['start_date'] = self.start_date if self.nationality is not None: result['nationality'] = self.nationality return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('address') is not None: self.address = m.get('address') if m.get('birth') is not None: self.birth = m.get('birth') if m.get('error_content') is not None: self.error_content = m.get('error_content') if m.get('num') is not None: self.num = m.get('num') if m.get('sex') is not None: self.sex = m.get('sex') if m.get('success') is not None: self.success = m.get('success') if m.get('end_date') is not None: self.end_date = m.get('end_date') if m.get('issue') is not None: self.issue = m.get('issue') if m.get('start_date') is not None: self.start_date = m.get('start_date') if m.get('nationality') is not None: self.nationality = m.get('nationality') return self class InitCaCertificateRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_uuid: str = None, command: str = None, config_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 业务唯一性uuid,用于后续的证书查询 self.biz_uuid = biz_uuid # 证书请求(CSR) self.command = command # 8B75D2EEDF1658CC9C1B7C05AA600856 区块链-baasplus平台对外持牌证书服务场景 # 2D25EFFD786590991542CAE2D14CB18E 区块链-baasplus平台对外非持牌证书服务场景 self.config_id = config_id def validate(self): self.validate_required(self.biz_uuid, 'biz_uuid') self.validate_required(self.command, 'command') self.validate_required(self.config_id, 'config_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_uuid is not None: result['biz_uuid'] = self.biz_uuid if self.command is not None: result['command'] = self.command if self.config_id is not None: result['config_id'] = self.config_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_uuid') is not None: self.biz_uuid = m.get('biz_uuid') if m.get('command') is not None: self.command = m.get('command') if m.get('config_id') is not None: self.config_id = m.get('config_id') return self class InitCaCertificateResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, cert_sn: str = None, p_10: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 证书序列号 self.cert_sn = cert_sn # 证书内容 self.p_10 = p_10 def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.cert_sn is not None: result['cert_sn'] = self.cert_sn if self.p_10 is not None: result['p10'] = self.p_10 return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('cert_sn') is not None: self.cert_sn = m.get('cert_sn') if m.get('p10') is not None: self.p_10 = m.get('p10') return self class InitContentriskInternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, audio_urls: str = None, biz_info: BizInfo = None, link_urls: str = None, picture_urls: str = None, scene_code: str = None, text: str = None, video_urls: str = None, account_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 进行识别的音频地址 self.audio_urls = audio_urls # 内部字段 self.biz_info = biz_info # 待校验连接 self.link_urls = link_urls # 图片连接 self.picture_urls = picture_urls # 场景码 self.scene_code = scene_code # 待校验文本 self.text = text # 进行识别的视频地址 self.video_urls = video_urls # 用户id self.account_id = account_id def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.scene_code, 'scene_code') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.audio_urls is not None: result['audio_urls'] = self.audio_urls if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.link_urls is not None: result['link_urls'] = self.link_urls if self.picture_urls is not None: result['picture_urls'] = self.picture_urls if self.scene_code is not None: result['scene_code'] = self.scene_code if self.text is not None: result['text'] = self.text if self.video_urls is not None: result['video_urls'] = self.video_urls if self.account_id is not None: result['account_id'] = self.account_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('audio_urls') is not None: self.audio_urls = m.get('audio_urls') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('link_urls') is not None: self.link_urls = m.get('link_urls') if m.get('picture_urls') is not None: self.picture_urls = m.get('picture_urls') if m.get('scene_code') is not None: self.scene_code = m.get('scene_code') if m.get('text') is not None: self.text = m.get('text') if m.get('video_urls') is not None: self.video_urls = m.get('video_urls') if m.get('account_id') is not None: self.account_id = m.get('account_id') return self class InitContentriskInternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, app_scene_data_id: str = None, event_id: str = None, hit_detect_items: List[HitDetectItems] = None, need_query: str = None, result_action: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 内容ID,用于查询异步识别结果时作为查询ID self.app_scene_data_id = app_scene_data_id # 内容安全同步检测返回的事件id,用于异步获取检测结果 self.event_id = event_id # 命中结果详情 self.hit_detect_items = hit_detect_items # 是否需要进行异步查询的标志位 need: 需要等待60秒之后进行异步查询 no_need: 不需要,已经同步返回结果 self.need_query = need_query # PASSED("数据识别通过,可以在网站上正常显示") # # REJECTED("被拒绝的数据,比如内容出现违禁词;不能出现在我们网站上") # # CC("CC表示用户发表数据后,提示成功,自己能看到这条消息,但其它人接收不到本条消息或看不见这条消息。") # # DELETE("删除数据, 为了不扩大化数据的传播,删除历史已经发出去的数据。") # # REPLACE("替换部分词为 ***") # # WARNING("提示数据,表示内容存在可疑,提示用户操作") # # RECOVER("恢复数据,将误判断的内容,恢复回来") self.result_action = result_action def validate(self): if self.hit_detect_items: for k in self.hit_detect_items: if k: k.validate() def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.app_scene_data_id is not None: result['app_scene_data_id'] = self.app_scene_data_id if self.event_id is not None: result['event_id'] = self.event_id result['hit_detect_items'] = [] if self.hit_detect_items is not None: for k in self.hit_detect_items: result['hit_detect_items'].append(k.to_map() if k else None) if self.need_query is not None: result['need_query'] = self.need_query if self.result_action is not None: result['result_action'] = self.result_action return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('app_scene_data_id') is not None: self.app_scene_data_id = m.get('app_scene_data_id') if m.get('event_id') is not None: self.event_id = m.get('event_id') self.hit_detect_items = [] if m.get('hit_detect_items') is not None: for k in m.get('hit_detect_items'): temp_model = HitDetectItems() self.hit_detect_items.append(temp_model.from_map(k)) if m.get('need_query') is not None: self.need_query = m.get('need_query') if m.get('result_action') is not None: self.result_action = m.get('result_action') return self class QueryContentriskInternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, scene_code: str = None, app_scene_data_id: str = None, biz_info: BizInfo = None, event_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 场景码 self.scene_code = scene_code # 内容业务ID,用于进行异步识别结果的索引查询 self.app_scene_data_id = app_scene_data_id # 内部参数 self.biz_info = biz_info # 内容检测事件id,根据此id查询异步检测结果 self.event_id = event_id def validate(self): self.validate_required(self.scene_code, 'scene_code') self.validate_required(self.app_scene_data_id, 'app_scene_data_id') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.scene_code is not None: result['scene_code'] = self.scene_code if self.app_scene_data_id is not None: result['app_scene_data_id'] = self.app_scene_data_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.event_id is not None: result['event_id'] = self.event_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('scene_code') is not None: self.scene_code = m.get('scene_code') if m.get('app_scene_data_id') is not None: self.app_scene_data_id = m.get('app_scene_data_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('event_id') is not None: self.event_id = m.get('event_id') return self class QueryContentriskInternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, hit_detect_items: List[HitDetectItems] = None, result_action: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 命中结果详情 self.hit_detect_items = hit_detect_items # PASSED("数据识别通过,可以在网站上正常显示") REJECTED("被拒绝的数据,比如内容出现违禁词;不能出现在我们网站上") self.result_action = result_action def validate(self): if self.hit_detect_items: for k in self.hit_detect_items: if k: k.validate() def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg result['hit_detect_items'] = [] if self.hit_detect_items is not None: for k in self.hit_detect_items: result['hit_detect_items'].append(k.to_map() if k else None) if self.result_action is not None: result['result_action'] = self.result_action return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') self.hit_detect_items = [] if m.get('hit_detect_items') is not None: for k in m.get('hit_detect_items'): temp_model = HitDetectItems() self.hit_detect_items.append(temp_model.from_map(k)) if m.get('result_action') is not None: self.result_action = m.get('result_action') return self class InitIndividualidImageauthRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, cert_name: str = None, cert_no: str = None, encoded_facial_picture_front: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 姓名 self.cert_name = cert_name # 身份证号 self.cert_no = cert_no # Base64编码的人脸正面照片 self.encoded_facial_picture_front = encoded_facial_picture_front def validate(self): self.validate_required(self.cert_name, 'cert_name') self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.encoded_facial_picture_front, 'encoded_facial_picture_front') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.cert_name is not None: result['cert_name'] = self.cert_name if self.cert_no is not None: result['cert_no'] = self.cert_no if self.encoded_facial_picture_front is not None: result['encoded_facial_picture_front'] = self.encoded_facial_picture_front return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('cert_name') is not None: self.cert_name = m.get('cert_name') if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('encoded_facial_picture_front') is not None: self.encoded_facial_picture_front = m.get('encoded_facial_picture_front') return self class InitIndividualidImageauthResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, certify_id: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性id self.certify_id = certify_id # 认证是否成功 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.certify_id is not None: result['certify_id'] = self.certify_id if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') if m.get('passed') is not None: self.passed = m.get('passed') return self class AddIotcseAccountRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 序列化的json string self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class AddIotcseAccountResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 返回 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class SaveIotcseEvidenceRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 序列化的json string self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class SaveIotcseEvidenceResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 返回 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseEvidenceRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 序列化的json string self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseEvidenceResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 返回 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class CreateDidCorporatedidagentRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, extension_info: str = None, indexs: List[str] = None, owner_name: str = None, owner_uid: str = None, services: List[DidDocServicesInfo] = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 扩展字段 # { "nation": "CN", //企业注册地址 "type": "LimitedCompany", //企业类型 "name": "演示用户名", //必选字段,企业名 "licenceNo": "1111", //营业执照 "address": "1111", //企业地址 "parentName": "", //<-必选字段 业务方名 需要提前协商 "linkType": "indirect", //<- 连接类型,direct直链企业, indirect间链企业 "certifyDate": "2019-1-1", //证书颁发时间 "licenceExpireDate": "2020-1-1", //证书到期时间 "businessScope": "1111", //企业经营范围 "businessAddress": "1111", //企业经营地址 "corporateBusinessType": 0, //<- 企业类型:0 一般企业, 1 个人商户 "channelName": "" //<- 必选字段 业务渠道 需要提前沟通 } self.extension_info = extension_info # 所有需要关联的外键,外键必须已did auth key controller的did作为前缀+“sidekey:”+外键 self.indexs = indexs # 企业名称 self.owner_name = owner_name # 自定义企业唯一id,企业在自有模式下的唯一号bid的hash值,调用者需要保证其唯一性 self.owner_uid = owner_uid # 携带自己定义的服务类型 self.services = services def validate(self): self.validate_required(self.extension_info, 'extension_info') self.validate_required(self.owner_uid, 'owner_uid') if self.services: for k in self.services: if k: k.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.extension_info is not None: result['extension_info'] = self.extension_info if self.indexs is not None: result['indexs'] = self.indexs if self.owner_name is not None: result['owner_name'] = self.owner_name if self.owner_uid is not None: result['owner_uid'] = self.owner_uid result['services'] = [] if self.services is not None: for k in self.services: result['services'].append(k.to_map() if k else None) return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('extension_info') is not None: self.extension_info = m.get('extension_info') if m.get('indexs') is not None: self.indexs = m.get('indexs') if m.get('owner_name') is not None: self.owner_name = m.get('owner_name') if m.get('owner_uid') is not None: self.owner_uid = m.get('owner_uid') self.services = [] if m.get('services') is not None: for k in m.get('services'): temp_model = DidDocServicesInfo() self.services.append(temp_model.from_map(k)) return self class CreateDidCorporatedidagentResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, did: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 生成的did字符串 self.did = did def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.did is not None: result['did'] = self.did return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('did') is not None: self.did = m.get('did') return self class InitIndividualidFaceauthinternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_code: str = None, cert_name: str = None, cert_no: str = None, biz_info: BizInfo = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 认证方式,FACE表示在支付宝内进行认证,FACE_SDK表示在客户的应用中进行认证 默认为FACE self.biz_code = biz_code # 姓名 self.cert_name = cert_name # 身份证号 self.cert_no = cert_no # 内部字段 self.biz_info = biz_info def validate(self): self.validate_required(self.cert_name, 'cert_name') self.validate_required(self.cert_no, 'cert_no') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_code is not None: result['biz_code'] = self.biz_code if self.cert_name is not None: result['cert_name'] = self.cert_name if self.cert_no is not None: result['cert_no'] = self.cert_no if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_code') is not None: self.biz_code = m.get('biz_code') if m.get('cert_name') is not None: self.cert_name = m.get('cert_name') if m.get('cert_no') is not None: self.cert_no = m.get('cert_no') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) return self class InitIndividualidFaceauthinternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, certify_id: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性id self.certify_id = certify_id def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.certify_id is not None: result['certify_id'] = self.certify_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') return self class CertifyIndividualidFaceauthinternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, callback_url: str = None, certify_id: str = None, redirect_url: str = None, biz_info: BizInfo = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 回调通知地址 self.callback_url = callback_url # 认证的唯一性id # self.certify_id = certify_id # 认证完成后回跳地址 self.redirect_url = redirect_url # 内部字段 self.biz_info = biz_info def validate(self): self.validate_required(self.certify_id, 'certify_id') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.callback_url is not None: result['callback_url'] = self.callback_url if self.certify_id is not None: result['certify_id'] = self.certify_id if self.redirect_url is not None: result['redirect_url'] = self.redirect_url if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('callback_url') is not None: self.callback_url = m.get('callback_url') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') if m.get('redirect_url') is not None: self.redirect_url = m.get('redirect_url') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) return self class CertifyIndividualidFaceauthinternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, certify_id: str = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性id self.certify_id = certify_id # 认证url self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.certify_id is not None: result['certify_id'] = self.certify_id if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class QueryIndividualidFaceauthinternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, certify_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 内部字段 self.biz_info = biz_info # 认证的唯一性id self.certify_id = certify_id def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.certify_id, 'certify_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.certify_id is not None: result['certify_id'] = self.certify_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') return self class QueryIndividualidFaceauthinternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, certify_id: str = None, passed: bool = None, finished: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性id self.certify_id = certify_id # 是否认证通过 self.passed = passed # 用户是否完成刷脸 self.finished = finished def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.certify_id is not None: result['certify_id'] = self.certify_id if self.passed is not None: result['passed'] = self.passed if self.finished is not None: result['finished'] = self.finished return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('certify_id') is not None: self.certify_id = m.get('certify_id') if m.get('passed') is not None: self.passed = m.get('passed') if m.get('finished') is not None: self.finished = m.get('finished') return self class InitEnterpriseidFaceauthinternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, ep_cert_type: str = None, legal_person_cert_name: str = None, legal_person_cert_no: str = None, biz_info: BizInfo = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 self.ep_cert_no = ep_cert_no # 企业证件类型(NATIONAL_LEGAL(工商注册号)或 NATIONAL_LEGAL_MERGE ( 社会统一信用代码)) self.ep_cert_type = ep_cert_type # 企业法人姓名 self.legal_person_cert_name = legal_person_cert_name # 企业法人身份证号(目前只支持身份证号) # self.legal_person_cert_no = legal_person_cert_no # 内部字段 self.biz_info = biz_info def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.ep_cert_type, 'ep_cert_type') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') self.validate_required(self.legal_person_cert_no, 'legal_person_cert_no') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.ep_cert_type is not None: result['ep_cert_type'] = self.ep_cert_type if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name if self.legal_person_cert_no is not None: result['legal_person_cert_no'] = self.legal_person_cert_no if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('ep_cert_type') is not None: self.ep_cert_type = m.get('ep_cert_type') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') if m.get('legal_person_cert_no') is not None: self.legal_person_cert_no = m.get('legal_person_cert_no') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) return self class InitEnterpriseidFaceauthinternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, biz_no: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 本次认证的业务唯一性标示 # self.biz_no = biz_no def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.biz_no is not None: result['biz_no'] = self.biz_no return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') return self class CertifyEnterpriseidFaceauthinternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_info: BizInfo = None, biz_no: str = None, callback_url: str = None, redirect_url: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 内部字段 self.biz_info = biz_info # 认证的唯一性标示 self.biz_no = biz_no # 回调通知地址 # self.callback_url = callback_url # https://www.example.com/redircet self.redirect_url = redirect_url def validate(self): self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() self.validate_required(self.biz_no, 'biz_no') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() if self.biz_no is not None: result['biz_no'] = self.biz_no if self.callback_url is not None: result['callback_url'] = self.callback_url if self.redirect_url is not None: result['redirect_url'] = self.redirect_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') if m.get('callback_url') is not None: self.callback_url = m.get('callback_url') if m.get('redirect_url') is not None: self.redirect_url = m.get('redirect_url') return self class CertifyEnterpriseidFaceauthinternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, biz_no: str = None, verify_url: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 认证的唯一性标示 self.biz_no = biz_no # 认证url self.verify_url = verify_url def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.biz_no is not None: result['biz_no'] = self.biz_no if self.verify_url is not None: result['verify_url'] = self.verify_url return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') if m.get('verify_url') is not None: self.verify_url = m.get('verify_url') return self class QueryEverifyTwometainternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, biz_info: BizInfo = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 企业名称 # self.ep_cert_name = ep_cert_name # 企业证件号 # self.ep_cert_no = ep_cert_no # 内部字段 self.biz_info = biz_info def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) return self class QueryEverifyTwometainternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, code: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 0:核验成功 1:企业信息有误 2:企业非正常营业 self.code = code # 经营状态 # self.enterprise_status = enterprise_status # 营业期限 # self.open_time = open_time # 认证是否通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.code is not None: result['code'] = self.code if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('code') is not None: self.code = m.get('code') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryEverifyThreemetainternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, legal_person_cert_name: str = None, biz_info: BizInfo = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 企业名称 # self.ep_cert_name = ep_cert_name # 企业证件号 # self.ep_cert_no = ep_cert_no # 法人姓名 self.legal_person_cert_name = legal_person_cert_name # 内部字段 self.biz_info = biz_info def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) return self class QueryEverifyThreemetainternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, code: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 0:核验成功 1:企业信息有误 2:企业非正常营业 self.code = code # 经营状态 # self.enterprise_status = enterprise_status # 营业期限 # self.open_time = open_time # 认证是否通过 # self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.code is not None: result['code'] = self.code if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('code') is not None: self.code = m.get('code') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryEverifyFourmetainternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, ep_cert_name: str = None, ep_cert_no: str = None, legal_person_cert_name: str = None, legal_person_cert_no: str = None, biz_info: BizInfo = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 企业名称 self.ep_cert_name = ep_cert_name # 企业证件号 # self.ep_cert_no = ep_cert_no # 法人姓名 # self.legal_person_cert_name = legal_person_cert_name # 企业法人身份证号码 # self.legal_person_cert_no = legal_person_cert_no # 内部字段 self.biz_info = biz_info def validate(self): self.validate_required(self.ep_cert_name, 'ep_cert_name') self.validate_required(self.ep_cert_no, 'ep_cert_no') self.validate_required(self.legal_person_cert_name, 'legal_person_cert_name') self.validate_required(self.legal_person_cert_no, 'legal_person_cert_no') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.ep_cert_name is not None: result['ep_cert_name'] = self.ep_cert_name if self.ep_cert_no is not None: result['ep_cert_no'] = self.ep_cert_no if self.legal_person_cert_name is not None: result['legal_person_cert_name'] = self.legal_person_cert_name if self.legal_person_cert_no is not None: result['legal_person_cert_no'] = self.legal_person_cert_no if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('ep_cert_name') is not None: self.ep_cert_name = m.get('ep_cert_name') if m.get('ep_cert_no') is not None: self.ep_cert_no = m.get('ep_cert_no') if m.get('legal_person_cert_name') is not None: self.legal_person_cert_name = m.get('legal_person_cert_name') if m.get('legal_person_cert_no') is not None: self.legal_person_cert_no = m.get('legal_person_cert_no') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) return self class QueryEverifyFourmetainternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, code: str = None, enterprise_status: str = None, open_time: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 0:核验成功 1:企业信息有误 2:企业非正常营业 # self.code = code # 企业经营状态 # self.enterprise_status = enterprise_status # 营业期限 # self.open_time = open_time # 认证是否通过 # self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.code is not None: result['code'] = self.code if self.enterprise_status is not None: result['enterprise_status'] = self.enterprise_status if self.open_time is not None: result['open_time'] = self.open_time if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('code') is not None: self.code = m.get('code') if m.get('enterprise_status') is not None: self.enterprise_status = m.get('enterprise_status') if m.get('open_time') is not None: self.open_time = m.get('open_time') if m.get('passed') is not None: self.passed = m.get('passed') return self class QueryEnterpriseidFaceauthinternalRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_no: str = None, biz_info: BizInfo = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 认证的唯一性标示 self.biz_no = biz_no # 内部字段 self.biz_info = biz_info def validate(self): self.validate_required(self.biz_no, 'biz_no') self.validate_required(self.biz_info, 'biz_info') if self.biz_info: self.biz_info.validate() def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_no is not None: result['biz_no'] = self.biz_no if self.biz_info is not None: result['biz_info'] = self.biz_info.to_map() return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') if m.get('biz_info') is not None: temp_model = BizInfo() self.biz_info = temp_model.from_map(m['biz_info']) return self class QueryEnterpriseidFaceauthinternalResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, biz_no: str = None, failed_code: str = None, failed_message: str = None, passed: bool = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # # 认证的唯一性标示 self.biz_no = biz_no # 认证失败错误码 # self.failed_code = failed_code # 认证失败原因信息 # self.failed_message = failed_message # 是否认证通过 self.passed = passed def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.biz_no is not None: result['biz_no'] = self.biz_no if self.failed_code is not None: result['failed_code'] = self.failed_code if self.failed_message is not None: result['failed_message'] = self.failed_message if self.passed is not None: result['passed'] = self.passed return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('biz_no') is not None: self.biz_no = m.get('biz_no') if m.get('failed_code') is not None: self.failed_code = m.get('failed_code') if m.get('failed_message') is not None: self.failed_message = m.get('failed_message') if m.get('passed') is not None: self.passed = m.get('passed') return self class AddIotcseThingsdidRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 序列化的json string self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class AddIotcseThingsdidResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class UpdateIotcseThingsdidRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class UpdateIotcseThingsdidResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseThingsdidRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseThingsdidResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseAsyncprocessRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # {"key":"value"} self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseAsyncprocessResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class ExecIotcseGroupRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class ExecIotcseGroupResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseGroupdeviceRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseGroupdeviceResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseDevicegroupRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseDevicegroupResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseTenantdeviceRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseTenantdeviceResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class UpdateIotcseDevicestatusRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class UpdateIotcseDevicestatusResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseDevicemodelRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseDevicemodelResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class UpdateIotcseDevicespaceRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class UpdateIotcseDevicespaceResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryIotcseEvidencebatchRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, biz_content: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 暂无 self.biz_content = biz_content def validate(self): self.validate_required(self.biz_content, 'biz_content') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.biz_content is not None: result['biz_content'] = self.biz_content return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('biz_content') is not None: self.biz_content = m.get('biz_content') return self class QueryIotcseEvidencebatchResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, raw_response: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 暂无 self.raw_response = raw_response def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.raw_response is not None: result['raw_response'] = self.raw_response return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('raw_response') is not None: self.raw_response = m.get('raw_response') return self class QueryBlocrBusinesslicenseRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, image_raw: str = None, image_url: str = None, source: str = None, trace_id: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 文件二进制内容 + base64 self.image_raw = image_raw # 图片下载url self.image_url = image_url # 服务调用来源(需要咨询服务提供方) self.source = source # 单次调用唯一标示,用于异常问题排查,调用方需要负责生成并且记录在调用日志里 self.trace_id = trace_id def validate(self): self.validate_required(self.source, 'source') self.validate_required(self.trace_id, 'trace_id') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.image_raw is not None: result['image_raw'] = self.image_raw if self.image_url is not None: result['image_url'] = self.image_url if self.source is not None: result['source'] = self.source if self.trace_id is not None: result['trace_id'] = self.trace_id return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('image_raw') is not None: self.image_raw = m.get('image_raw') if m.get('image_url') is not None: self.image_url = m.get('image_url') if m.get('source') is not None: self.source = m.get('source') if m.get('trace_id') is not None: self.trace_id = m.get('trace_id') return self class QueryBlocrBusinesslicenseResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, algo_msg: str = None, algo_ret: int = None, message: str = None, result: str = None, ret: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 算法错误信息 self.algo_msg = algo_msg # 算法异常错误码 self.algo_ret = algo_ret # 框架错误信息 self.message = message # 算法结果,JSON String self.result = result # 框架inference服务错误码,0为正常 self.ret = ret def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.algo_msg is not None: result['algo_msg'] = self.algo_msg if self.algo_ret is not None: result['algo_ret'] = self.algo_ret if self.message is not None: result['message'] = self.message if self.result is not None: result['result'] = self.result if self.ret is not None: result['ret'] = self.ret return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('algo_msg') is not None: self.algo_msg = m.get('algo_msg') if m.get('algo_ret') is not None: self.algo_ret = m.get('algo_ret') if m.get('message') is not None: self.message = m.get('message') if m.get('result') is not None: self.result = m.get('result') if m.get('ret') is not None: self.ret = m.get('ret') return self class QueryInvoicesocrVatinvoiceRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, image_raw: str = None, image_url: str = None, source: str = None, trace_id: str = None, file_type: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 文件二进制内容 + base64 self.image_raw = image_raw # 图片下载url self.image_url = image_url # 服务调用来源(需要咨询服务提供方) self.source = source # 单次调用唯一标示,用于异常问题排查,调用方需要负责生成并且记录在调用日志里 self.trace_id = trace_id # 目前只支持pdf、jpg两种file_type的识别能力,根据具体传入的发票的格式传入正确的值 self.file_type = file_type def validate(self): self.validate_required(self.trace_id, 'trace_id') self.validate_required(self.file_type, 'file_type') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.image_raw is not None: result['image_raw'] = self.image_raw if self.image_url is not None: result['image_url'] = self.image_url if self.source is not None: result['source'] = self.source if self.trace_id is not None: result['trace_id'] = self.trace_id if self.file_type is not None: result['file_type'] = self.file_type return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('image_raw') is not None: self.image_raw = m.get('image_raw') if m.get('image_url') is not None: self.image_url = m.get('image_url') if m.get('source') is not None: self.source = m.get('source') if m.get('trace_id') is not None: self.trace_id = m.get('trace_id') if m.get('file_type') is not None: self.file_type = m.get('file_type') return self class QueryInvoicesocrVatinvoiceResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, algo_msg: str = None, algo_ret: str = None, message: str = None, result: str = None, ret: str = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 算法错误信息 self.algo_msg = algo_msg # 算法异常错误码 # self.algo_ret = algo_ret # 框架错误信息 # self.message = message # 算法结果,JSON String self.result = result # 框架inference服务错误码,0为正常 self.ret = ret def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.algo_msg is not None: result['algo_msg'] = self.algo_msg if self.algo_ret is not None: result['algo_ret'] = self.algo_ret if self.message is not None: result['message'] = self.message if self.result is not None: result['result'] = self.result if self.ret is not None: result['ret'] = self.ret return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('algo_msg') is not None: self.algo_msg = m.get('algo_msg') if m.get('algo_ret') is not None: self.algo_ret = m.get('algo_ret') if m.get('message') is not None: self.message = m.get('message') if m.get('result') is not None: self.result = m.get('result') if m.get('ret') is not None: self.ret = m.get('ret') return self class QueryBmpbrowserPrivilegeRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, phone_number: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 蚂蚁区块链的唯一链id # self.bizid = bizid # 查看权限的支付宝电话号码 # self.phone_number = phone_number def validate(self): self.validate_required(self.bizid, 'bizid') self.validate_required(self.phone_number, 'phone_number') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid if self.phone_number is not None: result['phone_number'] = self.phone_number return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('phone_number') is not None: self.phone_number = m.get('phone_number') return self class QueryBmpbrowserPrivilegeResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, status: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 权限成功与否 # self.status = status def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.status is not None: result['status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('status') is not None: self.status = m.get('status') return self class CancelBmpbrowserPrivilegeRequest(TeaModel): def __init__( self, auth_token: str = None, product_instance_id: str = None, bizid: str = None, phone_numbers: str = None, ): # OAuth模式下的授权token self.auth_token = auth_token self.product_instance_id = product_instance_id # 蚂蚁区块链的唯一链id # self.bizid = bizid # 取消查看权限的支付宝电话号码集合 # self.phone_numbers = phone_numbers def validate(self): self.validate_required(self.bizid, 'bizid') self.validate_required(self.phone_numbers, 'phone_numbers') def to_map(self): result = dict() if self.auth_token is not None: result['auth_token'] = self.auth_token if self.product_instance_id is not None: result['product_instance_id'] = self.product_instance_id if self.bizid is not None: result['bizid'] = self.bizid if self.phone_numbers is not None: result['phone_numbers'] = self.phone_numbers return result def from_map(self, m: dict = None): m = m or dict() if m.get('auth_token') is not None: self.auth_token = m.get('auth_token') if m.get('product_instance_id') is not None: self.product_instance_id = m.get('product_instance_id') if m.get('bizid') is not None: self.bizid = m.get('bizid') if m.get('phone_numbers') is not None: self.phone_numbers = m.get('phone_numbers') return self class CancelBmpbrowserPrivilegeResponse(TeaModel): def __init__( self, req_msg_id: str = None, result_code: str = None, result_msg: str = None, status: int = None, ): # 请求唯一ID,用于链路跟踪和问题排查 self.req_msg_id = req_msg_id # 结果码,一般OK表示调用成功 self.result_code = result_code # 异常信息的文本描述 self.result_msg = result_msg # 批量取消权限成功与否 self.status = status def validate(self): pass def to_map(self): result = dict() if self.req_msg_id is not None: result['req_msg_id'] = self.req_msg_id if self.result_code is not None: result['result_code'] = self.result_code if self.result_msg is not None: result['result_msg'] = self.result_msg if self.status is not None: result['status'] = self.status return result def from_map(self, m: dict = None): m = m or dict() if m.get('req_msg_id') is not None: self.req_msg_id = m.get('req_msg_id') if m.get('result_code') is not None: self.result_code = m.get('result_code') if m.get('result_msg') is not None: self.result_msg = m.get('result_msg') if m.get('status') is not None: self.status = m.get('status') return self
34.0778
492
0.588089
42,640
316,685
4.116979
0.020943
0.050755
0.09136
0.075449
0.886897
0.853311
0.843513
0.839127
0.83437
0.830337
0
0.001192
0.314091
316,685
9,292
493
34.081468
0.806976
0.039835
0
0.882353
1
0
0.100299
0.003417
0
0
0
0
0
1
0.087048
false
0.026642
0.000264
0
0.152598
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
33b79fa53b7e8bf385009d4ca2b62f17e7849eee
2,886
py
Python
simplebitcoinfuncs/__init__.py
maxweisspoker/simplebitcoinfuncs
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
[ "MIT" ]
1
2017-03-18T06:00:51.000Z
2017-03-18T06:00:51.000Z
simplebitcoinfuncs/__init__.py
maxweisspoker/simplebitcoinfuncs
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
[ "MIT" ]
null
null
null
simplebitcoinfuncs/__init__.py
maxweisspoker/simplebitcoinfuncs
ad332433dfcc067e86d2e77fa0c8f1a27daffb63
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import try: from __builtin__ import bytes, str, open, super, range, zip, round, int, pow, object, input except ImportError: pass try: from __builtin__ import raw_input as input except: pass from codecs import decode try: ModuleNotFoundError except: ModuleNotFoundError = ImportError # Imports made explicit because some helper functions I made have common names try: #from .hexhashes import * # Can still be explicity imported separately #from .ecmath import * # but not including by default from .base58 import b58e, b58d from .bech32 import bech32encode, bech32decode from .miscfuncs import strlify, isitstring, isitint, hexstrlify, hexreverse, dechex, normalize_input from .miscbitcoinfuncs import genkeyhex, genkey, oppushdatalen, intfromoppushdatalen, tovarint, numvarintbytes, fromvarint, getandstrip_varintdata, inttoDER, inttoLEB128, LEB128toint from .bitcoin import uncompress, compress, privtopub, addprivkeys, subtractprivkeys, multiplypriv, multiplypub, addpubs, subtractpubs, pubtoaddress, pubtosegwit, validatepubkey, wiftohex, privtohex, Coin from .signandverify import sign, verify, checksigformat, signmsg, verifymsg, checkmsgsigformat from .stealth import paystealth, receivestealth, newstealthaddr from .bip32 import BIP32 from .bip39wordlists import BIP39ENGWORDLIST from .bip39 import BIP39 from .electrum1 import ELECTRUM_WORDLIST, Electrum1 from .electrum2 import Electrum2 from .rfc6979 import generate_k except Exception as e: if type(e) != ImportError and \ type(e) != ModuleNotFoundError and \ type(e) != ValueError and \ type(e) != SystemError: raise Exception("Unknown problem with imports.") #from hexhashes import * #from ecmath import * from base58 import b58e, b58d from bech32 import bech32encode, bech32decode from miscfuncs import strlify, isitstring, isitint, hexstrlify, hexreverse, dechex, normalize_input from miscbitcoinfuncs import genkeyhex, genkey, oppushdatalen, intfromoppushdatalen, tovarint, numvarintbytes, fromvarint, getandstrip_varintdata, inttoDER, inttoLEB128, LEB128toint from bitcoin import uncompress, compress, privtopub, addprivkeys, subtractprivkeys, multiplypriv, multiplypub, addpubs, subtractpubs, pubtoaddress, pubtosegwit, validatepubkey, wiftohex, privtohex, Coin from signandverify import sign, verify, checksigformat, signmsg, verifymsg, checkmsgsigformat from stealth import paystealth, receivestealth, newstealthaddr from bip32 import BIP32 from bip39wordlists import BIP39ENGWORDLIST from bip39 import BIP39 from electrum1 import ELECTRUM_WORDLIST, Electrum1 from electrum2 import Electrum2 from rfc6979 import generate_k
47.311475
207
0.77131
307
2,886
7.179153
0.442997
0.009074
0.010889
0.018149
0.715064
0.715064
0.715064
0.715064
0.715064
0.715064
0
0.032177
0.170825
2,886
60
208
48.1
0.888425
0.099099
0
0.088889
0
0
0.011197
0
0
0
0
0
0
0
null
null
0.044444
0.755556
null
null
0.022222
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
7
1d36743c1b76a4cfb5853a441b04df5728a9896e
4,680
py
Python
stage/configuration/test_databricks_job_launcher_executor.py
Sentienz/datacollector-tests
ca27988351dc3366488098b5db6c85a8be2f7b85
[ "Apache-2.0" ]
null
null
null
stage/configuration/test_databricks_job_launcher_executor.py
Sentienz/datacollector-tests
ca27988351dc3366488098b5db6c85a8be2f7b85
[ "Apache-2.0" ]
1
2019-04-24T11:06:38.000Z
2019-04-24T11:06:38.000Z
stage/configuration/test_databricks_job_launcher_executor.py
anubandhan/datacollector-tests
301c024c66d68353735256b262b681dd05ba16cc
[ "Apache-2.0" ]
2
2019-05-24T06:34:37.000Z
2020-03-30T11:48:18.000Z
import pytest from streamsets.testframework.decorators import stub @stub @pytest.mark.parametrize('stage_attributes', [{'use_default_cipher_suites': False, 'use_tls': True}]) def test_cipher_suites(sdc_builder, sdc_executor, stage_attributes): pass @stub def test_cluster_base_url(sdc_builder, sdc_executor): pass @stub @pytest.mark.parametrize('stage_attributes', [{'credential_type': 'PASSWORD'}, {'credential_type': 'TOKEN'}]) def test_credential_type(sdc_builder, sdc_executor, stage_attributes): pass @stub def test_job_id(sdc_builder, sdc_executor): pass @stub @pytest.mark.parametrize('stage_attributes', [{'job_type': 'JAR'}, {'job_type': 'NOTEBOOK'}]) def test_job_type(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_tls': True}]) def test_keystore_file(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_tls': True}]) def test_keystore_key_algorithm(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_tls': True}]) def test_keystore_password(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'keystore_type': 'JKS', 'use_tls': True}, {'keystore_type': 'PKCS12', 'use_tls': True}]) def test_keystore_type(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'}, {'on_record_error': 'STOP_PIPELINE'}, {'on_record_error': 'TO_ERROR'}]) def test_on_record_error(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'job_type': 'JAR'}, {'job_type': 'NOTEBOOK'}]) def test_parameters(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'credential_type': 'PASSWORD'}, {'use_proxy': True}]) def test_password(sdc_builder, sdc_executor, stage_attributes): pass @stub def test_preconditions(sdc_builder, sdc_executor): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_proxy': True}]) def test_proxy_uri(sdc_builder, sdc_executor, stage_attributes): pass @stub def test_required_fields(sdc_builder, sdc_executor): pass @stub @pytest.mark.parametrize('stage_attributes', [{'credential_type': 'TOKEN'}]) def test_token(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_default_protocols': False, 'use_tls': True}]) def test_transport_protocols(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_tls': True}]) def test_truststore_file(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_tls': True}]) def test_truststore_password(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_tls': True}]) def test_truststore_trust_algorithm(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'truststore_type': 'JKS', 'use_tls': True}, {'truststore_type': 'PKCS12', 'use_tls': True}]) def test_truststore_type(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_default_cipher_suites': False, 'use_tls': True}, {'use_default_cipher_suites': True, 'use_tls': True}]) def test_use_default_cipher_suites(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_default_protocols': False, 'use_tls': True}, {'use_default_protocols': True, 'use_tls': True}]) def test_use_default_protocols(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_proxy': False}, {'use_proxy': True}]) def test_use_proxy(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'use_tls': False}, {'use_tls': True}]) def test_use_tls(sdc_builder, sdc_executor, stage_attributes): pass @stub @pytest.mark.parametrize('stage_attributes', [{'credential_type': 'PASSWORD'}, {'use_proxy': True}]) def test_username(sdc_builder, sdc_executor, stage_attributes): pass
28.711656
109
0.714103
573
4,680
5.467714
0.102967
0.210661
0.107884
0.174274
0.896585
0.864028
0.839451
0.809448
0.789658
0.781679
0
0.001006
0.150427
4,680
162
110
28.888889
0.786972
0
0
0.574074
0
0
0.207523
0.029493
0
0
0
0
0
1
0.240741
false
0.296296
0.018519
0
0.259259
0
0
0
0
null
1
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
10
1d55e3b7b3c6d96997f0a4f42b585d19d24102d6
46,553
py
Python
parlai/scripts/interactive_web_test.py
nickim93/ParlAI
dd9ba6daed631706d88735ec069d36b36e511f76
[ "MIT" ]
null
null
null
parlai/scripts/interactive_web_test.py
nickim93/ParlAI
dd9ba6daed631706d88735ec069d36b36e511f76
[ "MIT" ]
null
null
null
parlai/scripts/interactive_web_test.py
nickim93/ParlAI
dd9ba6daed631706d88735ec069d36b36e511f76
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Talk with a model using a web UI. """ from http.server import BaseHTTPRequestHandler, HTTPServer from parlai.scripts.interactive import setup_args from parlai.core.agents import create_agent from parlai.core.worlds import create_task from typing import Dict, Any import json HOST_NAME = 'localhost' PORT = 8080 SHARED: Dict[Any, Any] = {} STYLE_SHEET = "//maxcdn.bootstrapcdn.com/bootstrap/4.1.1/css/bootstrap.min.css" FONT_AWESOME = "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.13.1/js/all.min.js" WEB_HTML = """ <html> <head> <title>Chat</title> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous"> <link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.5.0/css/all.css" integrity="sha384-B4dIYHKNBt8Bc12p+WXckhzcICo0wtJAoU8YZTY5qE0Id1GSseTk6S+L3BlXeVIU" crossorigin="anonymous"> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> <link rel="stylesheet" type="text/css" href="https://cdnjs.cloudflare.com/ajax/libs/malihu-custom-scrollbar-plugin/3.1.5/jquery.mCustomScrollbar.min.css"> <script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/malihu-custom-scrollbar-plugin/3.1.5/jquery.mCustomScrollbar.min.js"></script> </head> <!--Coded With Love By Mutiullah Samim--> <body> <div class="container-fluid h-100"> <div class="row justify-content-center h-100"> <div class="col-md-4 col-xl-3 chat"><div class="card mb-sm-3 mb-md-0 contacts_card"> <div class="card-header"> <div class="input-group"> <input type="text" placeholder="Search..." name="" class="form-control search"> <div class="input-group-prepend"> <span class="input-group-text search_btn"><i class="fas fa-search"></i></span> </div> </div> </div> <div class="card-body contacts_body"> <ui class="contacts"> <li class="active"> <div class="d-flex bd-highlight"> <div class="img_cont"> <img src="https://static.turbosquid.com/Preview/001292/481/WV/_D.jpg" class="rounded-circle user_img"> <span class="online_icon"></span> </div> <div class="user_info"> <span>Khalid</span> <p>Kalid is online</p> </div> </div> </li> <li> <div class="d-flex bd-highlight"> <div class="img_cont"> <img src="https://2.bp.blogspot.com/-8ytYF7cfPkQ/WkPe1-rtrcI/AAAAAAAAGqU/FGfTDVgkcIwmOTtjLka51vineFBExJuSACLcBGAs/s320/31.jpg" class="rounded-circle user_img"> <span class="online_icon offline"></span> </div> <div class="user_info"> <span>Taherah Big</span> <p>Taherah left 7 mins ago</p> </div> </div> </li> <li> <div class="d-flex bd-highlight"> <div class="img_cont"> <img src="https://i.pinimg.com/originals/ac/b9/90/acb990190ca1ddbb9b20db303375bb58.jpg" class="rounded-circle user_img"> <span class="online_icon"></span> </div> <div class="user_info"> <span>Sami Rafi</span> <p>Sami is online</p> </div> </div> </li> <li> <div class="d-flex bd-highlight"> <div class="img_cont"> <img src="http://profilepicturesdp.com/wp-content/uploads/2018/07/sweet-girl-profile-pictures-9.jpg" class="rounded-circle user_img"> <span class="online_icon offline"></span> </div> <div class="user_info"> <span>Nargis Hawa</span> <p>Nargis left 30 mins ago</p> </div> </div> </li> <li> <div class="d-flex bd-highlight"> <div class="img_cont"> <img src="https://static.turbosquid.com/Preview/001214/650/2V/boy-cartoon-3D-model_D.jpg" class="rounded-circle user_img"> <span class="online_icon offline"></span> </div> <div class="user_info"> <span>Rashid Samim</span> <p>Rashid left 50 mins ago</p> </div> </div> </li> </ui> </div> <div class="card-footer"></div> </div></div> <div class="col-md-8 col-xl-6 chat"> <div class="card"> <div class="card-header msg_head"> <div class="d-flex bd-highlight"> <div class="img_cont"> <img src="https://static.turbosquid.com/Preview/001292/481/WV/_D.jpg" class="rounded-circle user_img"> <span class="online_icon"></span> </div> <div class="user_info"> <span>Chat with Khalid</span> <p>1767 Messages</p> </div> <div class="video_cam"> <span><i class="fas fa-video"></i></span> <span><i class="fas fa-phone"></i></span> </div> </div> <span id="action_menu_btn"><i class="fas fa-ellipsis-v"></i></span> <div class="action_menu"> <ul> <li><i class="fas fa-user-circle"></i> View profile</li> <li><i class="fas fa-users"></i> Add to close friends</li> <li><i class="fas fa-plus"></i> Add to group</li> <li><i class="fas fa-ban"></i> Block</li> </ul> </div> </div> <div class="card-body msg_card_body"> <div class="d-flex justify-content-start mb-4"> <div class="img_cont_msg"> <img src="https://static.turbosquid.com/Preview/001292/481/WV/_D.jpg" class="rounded-circle user_img_msg"> </div> <div class="msg_cotainer"> Hi, how are you samim? <span class="msg_time">8:40 AM, Today</span> </div> </div> <div class="d-flex justify-content-end mb-4"> <div class="msg_cotainer_send"> Hi Khalid i am good tnx how about you? <span class="msg_time_send">8:55 AM, Today</span> </div> <div class="img_cont_msg"> <img src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUTExMWFhUXGBgaGBcXFxgXFxgdFxcXHRcXGhcYHSggGBolHRYVITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGhAQGyslHyUtLS0tLS0tLS0tLS0tLS0tKy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIAOEA4QMBIgACEQEDEQH/xAAcAAACAgMBAQAAAAAAAAAAAAAEBQMGAAECBwj/xABBEAABAwIDBAgFAgQDCAMAAAABAAIRAyEEEjEFQVFhBhMicYGRobEyUsHR8ELhBxQjcjOS8RUWNENigqKyU3OE/8QAGgEAAgMBAQAAAAAAAAAAAAAAAAECAwQFBv/EACkRAAICAgEDBAIBBQAAAAAAAAABAhEDIRIEMUEFEyJRMjNhFCNxgeH/2gAMAwEAAhEDEQA/APQq9PM0gbxz13bxv5rxnb+c13kVKbXNOjw4VALACNXtOY24CbCFfNodLX1AAwZZY4MiILhEDsiZtxOnNUfpPiKdWMQwAO/W0FrSdTeXkuIJ1DRKmpplfktf8JRWbWc0gPptY64uWkuZbX4SQ8Rujgrr0hq5g5rg46RbSI08JXjnQzpg7AVn1GsFRtRkOYXEGxkEG97nirmOmxxbbYd1KSXSarR2QZzBoEmADyKhKq2aceRxkn9CnpoGOytboAdL8PVeYVWQTxk+69E6SuljKosHkgCZMa35gqnfy7alSoC6L25ws+J6o09TJTlyiAbNDutaBqTaPP6FfSGHORoDKNAWF8gM21nevn2hT6pzX5hLTLeIt7EWK9i6KdJ6NejTaajBVa0Nc0mDYRInWVZRkZYcViC6jVD202gscIAjUEQTwuV4rtDFmlZph2s2ieHeYPkrx0yxpnqmk6SeF5i2h3a8V57jgKjoiQDabkxIJ9ChLdhekK62KrPu6XRxmY8NVJRqPGkDvGikoVQHQ2bbtN+hBKkq12zDjra14n2UhA7sQ/iJ7kNXa48STyR73AGMpka6QPX2XHWuvkbpvQAJR2c49pxDe8wpHspCwOY8hbzNvJRV6jnGCQY3WP3RWGwObUwOIiPZAA7WuJ+ENHEwPr9F0eqGsuPJGHZzeTuRdr5iPVY3BHcyOcA2QAOGM1aw+Q+yyriQ21xyCIds8zJdHfI9ioqzLRDT4n6piAztFs2D/MLn/aLdAHDmY+i5xNEHS35yQVQXj2ugAmrip3+32UZfOoEodzY/dFYBgLmtcQGkgE6wCdbJMaLD0XpZi93/AEtHq5XTZdd7aNekwCHgF17wJFvOVHT6EOwbBVZVbWpuaM0AgCYggzDhrw1Rez6WZ7WBoGYEbxaDvJNrBVWpIspxkgF1Wp1bXNdEDqrawO0IUOCpONWmTJOdtzzMazzRbXZqVOoAMjqj2tcB2C4NiBxPZPktU6rmkOBAgg6Dcpxk3BoJv5II6p/4FiYZhwH+YLazcWavcRRMZWL6DS1zHQ5pgAghzjqXT2b2jW66pUDTqt6xzmU3uAqgSS0kw+wNxFwE/Z0bZQw1VjSXFwkk6y27dLSOO9C7ZdSp0qbxmJqMl3a5g+hOvNW7iUuSyulGkC4rolhTX/o4sClBJfVYdRfLDIInSTwKS4+pToNFNpeRmJMOJI+UAmzeMDdxXdSg+R1dCoC+MhcDlkn4pbYnS5Qm1cFUa91OqYLYOgNuMjXvUqfllUYtSobYOp12zze9Ct3nLUE+5cPBVnEnK5zhx99U02DULevpXAdlMR8mb6EeSW7Sw8XBsTB74lCVSY0viAuxBO9S4SscwvvHuhHs1Cn2ZhnVKjWtmSR7q0rXcve0qzi0OBmAbkk5i0WEndoq2KxhgBu2JO8edjonm1HhoYNcr2g33EO95Vb2thSycrszSRB4gnsn1jkQUgbJWiS4kuMncDpxJ3IhhaBZs905fGBB8Uqp5mkBxOXi7NB36jcmrMKCAWl27Ql7b8HC48QUMDH1HE2ad3ICO7cozh3v+J2UcCHZfT6roNM2mf8AMPAb/Bcio4T2r30BHoQhAdEOb8JbHIkTy4FDV8XV+b1j6LVSTfKTzAj7BDVqZFiT5T6oA76walrhxgz9Qo31mfMfI/usFASIDr6TZcnD8u8Xn90xHXXu/TVt4j3ChqYmof1zHMe0LsYPh9Quzg5bb4gfNMAR2Kdo8X5k/VcdZ+H/AEU1Si4W1HCELk5JAd5xvH55rttY/N9FzRaN65dT5oAunRPpDXaw0nF1ShGWILuqJktI+VskhN9hbQZWYGzq3QmJ3Ec1RNj7UdQcbWcC0931V1wWzMNlo56mRpaDnB7YeIm8HW27eq+NOyxytIe1qAIaCJDPhBJIbaLDQGCRbitdW1dUNs0TI7FjDSaYl/MDjx04wisLj84MQI1GVoPLQJ2iptg+bmfNYmHXu+ZbRofJm34tjy+mwgwSL6xuMdyX4XDU6tFrXMJLDUpmCNA4OYYmR39youN2vWbU+UzIAaBodIA/JVo2X0hey7WjLUOZ5y/CYguEjukKMvslhbuh9VZUDIYwEiwzvyxzsDKp3Txz6Zp1HBmaIImQQ0ggEaibju7lc8VtKWtcKmbfaBA0Lrd6VYrZ7Kjy97Gvfxd2tNNbJwp7J5OcHT0VKkzrNotL2ZG1mN1duqUhlcI3Zgg9v4J1Bz2OhwDtRvtAPfBE9xXoFPDgQAxttwjdyjcl/TijRqYUtZlFSm4OgCDrDt28EeSjJvkirk4nlsk6qy9EMOAXPOoAA4wTJ8TB8lX3U7wrL0StTe/i7xhrTHuVaM52hL3PEwC6e+IhQMYAMr3jS2hmdYMme5d16TnPBBtz019DCzEbNgdkzG4ye+27dfmgQXiMGAyC3M03BGt50/N5QGHpgNcGkggAtPhlMnhZp8Udg2viHOtltqY5X0i3mhamIAbMSGkhwGoG+O783IAHbXmzoPM39QuXPINiSP7gR5ESFDXe2QRZcPqcDu8x3IGgym0QTbdu/OakETOn5uSqlULjM2Redw3juKVjoLquabfngoyRvv6eaFNcb9O9a60agwmFHdTXswe68hDurHh9Fy6sDqR9udly4zo4JkaN1KkqBx5Lqowje36+iGqNQBM4W/Ch3HcugDExC0XygDnMmGCxHZyybacI/YpY8QpcHUyvadwISYy9bD2cSDUqPJefhE/4Y5DcT+yZMrOpuzDUeo4LWy8Z1jN2YRPO1lDjKxNyq72DHP8AvCz5XeixVnOeI8lidioA22JZRrttoTH/AFgfVpRmxsVTDRmIubk35g8d672cHOw5Y4dumXCHfNTIqNB43BHmpul+AZVZTxmHhrakGo0GMj7g27wQecHQp6Yskfo72ltUNqANLSXtyyHANgyB2pgapps3bNMsaHn+pEFrRcETzgyLrzetLSI4+Ewp6zWudnzTETxdbgfhO7XcklXYlxuF32Lk7bxbUDgDbNIlpIBEaC5Jsgsfif5lmZz221AMuJFwHchuVeqU5pF7Nzhm4jh4SoamJOjSQN/AfdR4u7NePPCUHCa/wb6v4nHhDfG0+qsHR8ZGjlc+31Crzqjv1HUWn3VnwQy0QADMEkxeT+eytMjVMIxOEksiwOU8+X1UWJaBUmBJEbwHC+vOwHgjesDerJ32+vpMJRtPEwQJ+ExutB177HzQImosgkgmC05ZAkcjx3Ku7SrFry5sQ6ZHH95BUzcduFiJ3eDlwXZwQRv7Q3jmEAAhwO6VtrDrp9E32FsttR+V099h6pjjOjNQXAkHeBZQeRJ0WxxNqyt0pGg+yLdSc0STrwCa4PZTmH+qwjh9xx/dHuwAqA+kyoc9k1AqbHiYi/fCytVn9IHqm2J2WZj9XDcf2QGJ2ecpI+Iat/ZTUiEosXOj9rfVazf9IjjP4FzUYdCPRQlo3lTINEtV9vyB3LimT+fnNaMLXWwmRZskb5UzAO5D9aN3nClw9KSPdAWcV6JHcoaeo7wisceem5QMYZE8QkMsOBxbqbtfDiE7NTMA4FVuZHPci8BjA0w423qADXPyWKXrqfzNW0goYYih1eIOhFSnSqyNHFvYeR36+KHr0mEPY6A3IMpgZg5vwjSS0iAY79ykJLm0XOEFtR9IxoG1RLY/7gpMTh8xFrkD9x5hS8g+wqFGm2k+mWglxJDyzQQLAm/E6pJU2VUy5mjM4n9JtHH3VrZRa3W0rYc1o7MeWnehRSJyyylBR+ivYLZ5aHsdJz5A6JganUcyEyw/R/qnh7agaGmwc2SRvB4zJTDryeHr9VrUi5OlkMqS2Mto9H6GKwjqjaeWqztBwsSGuOZpA1loPmqtiqxa6ZsHNB4WFx6r0LowTlcAwhs753x9ivN9qkFlRukkkeIHt9FK7H5GBrB+sgSCBEHTgqxteuesdfX33+onxTn+Zzm2uUEbpgR+eKS7TdmJPE/X/RAxYa51lN6WFL8r2zLvtdJmtJMC5XrfQ/osamGa4dh4uCQdY0PEGyryTUUW4ocmKth7HeDJkcwB7Qr9hGMpMmpJaBv38gAEXsjAEDttylpMiZHIzvslvSPESYmwtAke3csblyezYo0qRXeke2hUJaGBjByuefLeqnUx7m/C4z3nTn+ye4ykXGzB3fgSnF7JzauAHAx9YWiLVFMouxbi9sgwHCSOBvK3T2sHESRPke6d6kd0dbudPcfooH7Ey8VLlEgoSCH4Ev7TYnvHCygqbLzSHABw3wT6tRuBokEXsrFSwwcBI+6i8jRasSZ5xi8MWmCO5QCmNfZejYrZ7SCHCe9V7HbHpCTA8LKay2VzwVsrTQ0b/VSmraw8dSia1Om3T6oGrB0KtTszyVEtKk0XcbcDvWqcOqC28IGUx2aJe2x3+x+6TEhvisP+oaKNl9yaYSmSNJChxGEymQIURgvVt+UeSxT5DyWIFZZaTi9r25Y7Ie3jmY4EW8XKXHgENcORB39oT9V3haRzAgGNDusQQfdbbQinJ/S4tM78jvsQm+4l2FpudZKJw+znuvZvfPsnGVrdIHcFo1+amRB6Wymj4jPmPqp20GNiAB7qN1fmohXkwL9wJSY+4aK+XQkdxhUrpJs8sbTdBh5f5NcWa8wPRW4U3ndHejdq7NFWjhGkA/4gkf3k+8qqc+NM04MLnJxZ5U6vle08onvEj3RtbDNcy36rjz09Crd026D9W1houGYN7TTa8buCp+zmvkMIM3gExfh5j1TjkUlojLFKJnQ7Yz6+Mota2QHB5/tae1r3r6QweDDWwAvK/wCE2EnF1HgdkUzHe4tkd+q9fpLPmdyLYfGAs2nhDEtsVTNsYXEC7aXWdy9JcyUPXozoqePkshl8M8J2vsvaDv0FjeAtbiYuVVttbMrUXSc5aQIcR2TxuLW719E4vCvM5Y7iqVtXZtRriere2ZnJdp8NFbDLXgcsakeebA2C+tRdUJLb9g7jAuYi4mLrnC4h9N5pvM+oPcTcK21mO0l/+SPZQ0+j4e4l1N2szJHoFNzsI4mvILs+iCYg+SteD2ZLZ1WYPZMQY5c45q17JwkNjwVLey9aR57tqlkkqn45zn74Xp/TjZ56p5Gv7hePbSznPEjLoDqb3MK3FspzS0RVdm5tH34IOts+o3UeKI2aKlSo1gPxGNAQBvOiOr1alF/V1e0DoRMH7FaLaMtJiF9NM9hsmXRpYc51+iK21hQKZeN8R4kJ90X6MF+EZUDu255ME9nKJHnaU7tEJLiQuwcgDMRy3JtsrZDqrgHDsjU7iBu709wWwKbbvOd3/iPDf4pqwAaeSVMrcyL+TZ8jViJlaTpkLEr6v4EHiapDnN3Oh+vzCHeqnxG18HSs+sHkbm9r/wBB7lVzbPSxj3TTpmIiXBree4E+qbRNDykHOaIbuudBbW5WqrQ0S54A5fc2VGrbaq3h+UHWPufFQ4fD1axGVr3kmxgn/wAjb1T7CouGI2xh2NzSXgmBFwSNRuHugh0tq5XCi1rGgSc5kcoAiXa+RQ+zujIe3+piW0yLBgp1KrvPstHmn2ydjUKbSHYek8n9dTO49+Quyj996hyvsiSXkrTa+OxQn+sWXu2m8U/NggDmZXqnQ/BBuEw7TUZV6uo4Zm5o7ZLsvaANs0TySfD0crcgc/L8uZ2XuyzpyVg6PEFr6Wmjm+Fj46KrLGTjs09POpgXSoOqYk02yZ3JlszoyynhKjXNBe7M4E6i1iOGhTDZ+DFV4qOtUAIncUXQDv6jXTEfgWVWjbll8eC8UVP+GWHLHYmREuaWndBB3+Sv1J91UeiRLX1aZEGQdLEDQ+UWVow773Tm/kZ3HQxaJXfV2UNJyKDVOOzLK0AV8PyS6tRBT+qBF0DXa1SlEuxZGI3bNadQu2YFo3fRGPAUTsU0b1DsaVb7GmbPndZHsw0aKLDYxrj2U2pMEJxSZny5JR0ys9JcFmpPHFpXljtnTff9l7VtenLSvM304qOHNJtxZfi+cNlUrbPIJLew46kNFxwNpQGJ2L1rgXOJPdEfl16A3Dyo62BH5+c/RP3WN4kUTaezIw75vka5w7wx0eVz4K7dHsJ1WFoUyILabZ7yAT6kpdtLZhqtcwODZBud+7L3mYVhzbgtOF2jJ1daSOw1aAWg52gC5yk8VaYyWyxDzyWkCs8iobKr1BmZSeWzBMWHfKs2zejNNrQ59UuqX7LaM5ZbHxOdFpO5OaWDawuG+e0ZBBPG1j3o2myyi02WWV/BdH8r87hTLpkEh9u8TkP7p71T3j+pUdU4AuIbH9unhopoXYHNPihWyNlENFgB3QFknNEWiff9vNS2WZwmI6auX7WGGisZAYRI+adWjmbrh9YQqh00rE5Gk2vA+vMwk1qiUW0z3vZ4pVmNq0iMr2ggjeCEW7J8A1j8914D0D2/jabhhqP9RjiZaZ7Em7g4aC+i9c2FQqtxAdUdOambbhdp+ixT+Lo2cOUXKwtmygypnae9TFMKoQtVirmiUJ33Mo1IRZxPNAFCV8RCSlRL2lJjGvjgRqltfGgXlLMXtCN6R43afNPk2XQxRiOMftbh5pRQxRrVW09zj57/AKJXQc+u+JsrKMB1bWupjtMuOdrz5lBdaSLTg9n9WBDbJrhhbW3svKsV/ECvTqZXYdxA/LcVZdg9NmVwYOVw1a4QR4cOasi1HZiy4py8ln2m6AvN9rkiqHNEtE5o571ZNo7caTBIVRqbWYKpk2hRlLk9F+GHCOxphHAomqyyr+G2oMwYGuuTDgLDhKcHESFCiwXVoFRp5/Qo2d6U7Sq9pscfoVaqOwHm5e0DlJWrC6Rg6yO0xX1vNa61PG9HG73nwACKpbDpDUE95+yv5GLiVjruSxWz/ZNH5B5n7rEuQcTzylhwBEWUzaaJ6orfVKwAbIVmUKbqlnVlICOyhczXginNsoYJQBC1iQdK8AXtY4C7XHduIVl6k8fBdHCzu+6GxoB/hDg4rVXxHZDTzvP2XsXVDXf+fYqodEdmikC4CC8kxppafRXGdCsE3cjVK1FERqiY3gSoarluqYMISpVvCrky2ESKtVhLMZWOqJxL0txrxEWlQNC0VvauOINknDXVDeQ1PcVhQZ3lIsc98EMaSeXsrYUE2/BY9jvpsgD8KstOuNF5lgDiqRzVcO87wWFro8LFPaPSGoIy4Sq7vEeyk0RSkx5tPCNfqO8wvPtsgMqdiQ4WDhY+is9XpnVFn0AwcHBzfUhLauOw9R2ZwyHUzcHiBCaJODoRVMZW/UdURsnD9vM7tO5390VjcZQJAaR36e6ifUDNDbjZNkfxGWcB0oh2LgWSOtipDYN1JnmBxSonF8nSDTXzGSi8J/FLqqjsNXpXY7I2oDYgRllsTMQlzLFVfbGyajsfnynq3Fjy7d2WtzCeMhW4mth6hi4uMD2P/beIcJaxoHcT7lDu2vWd+sjugR6JXh8aYaWyOSOqVJJMeSug7jZycq4ypEn8/V/+R/mVihz8liZXbNRzWNbOgnuurY3Z9IERTbrOkm3fzR1mjcB5JcyagU1mz6p0pu8o91O3YVY/KO932BVjq4+kNXt859kJV25SHzHuH1MI5MdIAp9HCR2qnk37lEUujlIal7vGB6KM9IgbNpn/ALiPoFC7blQ7mjwJ90bYWkNaeyaQjsDxv7optFrbBoHcAq0cfWefjN9ALeUKwbKwjmiXklx4kmOWqqyy4osgrJadP0KYUnWhDxD43ELb+yY8ljUi6W9A2IeTNrj1QdV8gXU+PcYzDdqk1bGtEGeyfRQk9mjHHRlerG9LcRWkc1PiKoIsljgZjXmmiyiG5KY7JwQmXBRUqE7k5wDALEFOwJ34dutkBiMc2me0zxCcGnASTadKQbT4JpjTIa21aNQEFzXDg4D2KruO2Vh3mQ2P7HFo8hZA7V2OSTAcPNJqmGrMtmeB4q5bJc68Bu0NkURZofPN5Stmx3kf4jomwm37o/DYZ5d2nE+aaNZlanyorklIX7PwkCXGY0XbNSfJaqvJOUeN1I0WSbOj6d0/KfN9kdjVO8PgBVoEyBDrTNjzMWFykgNuYKf7BeS1zZMET5qzEk5bMnq1xm2Q7NbJN5DQb6ab12cQePqtj+m10/q7LePM9yCLgtjSSpHAtydsL61bQfWd6xQoKLPV2hVdq93gY9kM9zjqSZ4rkrbjCdEjIUVeGgucQ0DUkmB5ITaW1W098u+UfU7lXNq7VqVRBDQAZiT6ooEhq/pFSaTlDndwgeZKjp9L6Vg5j2ibnsujjYESqpVqG8geBQdWDofBOiR730UxGEqtzYeq2o79W57eRYbt8k9c6F8x4fEvpPD2Pcx7dHNJBHiPZeldDv4lZy2hjCA42bXEAE7hUGjSfmFuQWLNhl3WzRBp6PTa3aFtRopGv61nBw9CELTqQVlXsnO3Q6/dZbLXD/gOatyDYjUfm5INtYLV1PxbPsrFi2CoJFnDTnyKQ4qobgiCNRvUGaYOypjaZbIM242I8OC23aYF/rKl2vgw4ToeI/Lqo4oVKZI/PJWximEnR6DgdpNO9N8NigYgryKjtgtNyU0w/SUjfbw/Ap+0/BX7iPW6VYHepKjgvOcH0rB1cEcOkgj4rd6XBj5IsmKcOR+iV18K06gJTU22HWkqF+1rWT4MfJB1TCtboAkm0qt8rRJ5LjE7YJs2Z90EcPUccxdlUuNdzV03TyzPS0TUKETvJ1Uj2wJ0Cjq41rBAh7uWg70BUxj3fEfBFHoI8MUeMQ0vhjnk2Any3qLZ/SNhd1bJk6OsBYEnW/BTNoZqTm73NI8wiOiWw8NTB605nuBE6FunwjcfdX4pRim33OH6tilOSS+jf8yXXMk7vt7LtkqWtgQx0TPAjQjcQugAtC2ebcWnRFB5raIy96xS4sfty+hcemdMH4CR/cJ/9Sgcd0qc+Q1xY3gNf832Shz1C4ooYX/MsM9onxhQ1Hc3Dxn6oR4HALWfiPJMDp73jQz3qE1mk3kFF02tdo6DwdceChr4Wd0cxogCMttxCgq0vJcua5uhWCvxSHZf+hf8QTQaKGKDn022ZUHaewfK4E9sDjr3r1LZe1aNdmejUbUZocpmORBu08iF84SDyU+z9oVaDxUovdTeN7TE8iNHDkZWXJ0yltF8M3hn0U7s93tyQG0abag4OGjhu+45KldH/wCJjXxTxbch06xgJYf7mat7xIVlq4xrgHscHNcJDmkEHxCxThKHc245KW0JsWHNOV4h27e13cUlxjAZkQVZcVUa9uV1/dV/GNLde03jv8eaIkmVvGYG5MfZLamEhWaqEDXp8lojIzyiI+qK6ax24nzTLqwugwKfJkOIHTY/5iisPh3G5eY8l3lGgRLIAhJyNvR4FlyJPsjuiMvw256nzK25gPxEu71H1i11nC/ddVnpYRUVSVBNKk0aDwC6bgQ42ssoYJxu45G89VvEbUYwZGGeaCUnFL5BnWhrmtB018FHtekDUD26uHr/AKITBjPe95uiKtMtLWA9qQWuNm3GhPHVTSOP10uU0/4GuzmtqMFMuDngSzeXZjdo8iQOSFdjmB5YIn6jUDut6qHC1+rDqhDWVqUuzD9fJvEadyRUQQesce3Np75Lj7rd0qb2zBHDHnzLNmfw9Fi56w/IPMra6Vm3X0UZ3eo3PK25RuK57PMGdZO+65f5LlwBUZkcx6pBZJ1sa+YRdKvzS7NP2XTDwQMPcAUHVocFIyqui+e9AABCwVeKIqDj5qB9JAjZMiyJ2VtuthnTTd2Tqx0ljvDceYulzgQszg6qLSemSjJp6PUNk7fp4hsskOHxMPxN9O0OYU1erqvKqVV1NwewlrhoR+aK67E242u0g9mqBdvzRvb9tyxZMDjtdjbjzqWn3Cq7OFkBVJBuj3c1FUYoIsaACVy2k5MWUgbR5olmEHBTsjxFgDWCXeKnO06B1YfBp+ylLWzBI7lt+GJ0UW7PQ+m4ZQx8l3YM7a2HH6D4grR6QCP6bPIQphs4C51W/wCVA0CWjc/d+0LnY59X43OA4AI3C4BpuDPepjmHwhqGqU3ON3X5KRU/jt7G2AIFRrYsJ07k0pbPFWWPJIHw3Ai2vKJKW7GwhgvO4Hx4lNWOH3V0YXE8513Wf3xZt+GMZScBIMyNHAAAH19EgfXa1rqj/hFo48kXtyoOtyt3Q3x3+pVT6RY2SKbfhHut+NcIEnl4w5Dn/e1vyu/zLSpmZYn7rKf6qQ+cSFznHcUQSoKjJVTOWcvXBK4JIsdFjikBp48CuRU46rCVG47j4IAIa9d50LTqbjquy5AwkPWi2NFCHLptRAGyVDUoBESCuXUjqECAbhdU3kEOYS1zTIO8KZzuIXBY06GEDRZMH0mYQBVaWu3uaJb3xqO66cse17Q5jg5u4gyO7v5KgGmeRUmAxtSg/My4/U3c4c+fNZp4F3RohnfaR6FQoyQTu9EXX7LSUDsjaVOqzMw94OrTwP5ddbaxwZRqO4NPqLesLLT5UbLVWQ4qg5jsr27gQZGjgCDz1WU6R/S49yTbF2uP5Uda4g0SGTBJyPksBgbiHgeCKb0iww/5jj3McrZQd9ju9L1mF4lckv8AY0a928HyWOSWp0oobhUd/wBoHuUtxXSs/wDLpBvNxJ9BAQsUvonk9T6aC/KyykTxPJQ0qwfUFGmQ6odYuGDe5x+nFUnFbVrVPiqGODeyPIJt/D+sW4sCJzNI7ogyFbHD9nH6n1dzTWNV/J6hSZlsP06fn0W3PABO6JOgsB+y6ul+3KmSi8zd0NHjr7FXxWziQuc0vsqONxPx1TxJHedFSazyTJVh6R1srWsHefRVtyun9G3qJbUfo5WLcrFEzbLDh1p2q2sSZSD1lDTW1iQGlDWWLECNvXe5YsQM6K2sWIGbRDFixAEWKQK2sQB1TWjqsWIGO+hf+O7/AOs+6b9MP8MfnBYsWR/sRqh+oRbM/wCDxv8A+b/2qJA7VbWLV4Mi7s2FzU1W1iGJHIVk6Af8a3+130WliQz1j90m6Tf4bf7x7OWLFLH+RZ037Eeb9Jf8XwSRYsU5dy7P+bOVixYokD//2Q==" class="rounded-circle user_img_msg"> </div> </div> <div class="d-flex justify-content-start mb-4"> <div class="img_cont_msg"> <img src="https://static.turbosquid.com/Preview/001292/481/WV/_D.jpg" class="rounded-circle user_img_msg"> </div> <div class="msg_cotainer"> I am good too, thank you for your chat template <span class="msg_time">9:00 AM, Today</span> </div> </div> <div class="d-flex justify-content-end mb-4"> <div class="msg_cotainer_send"> You are welcome <span class="msg_time_send">9:05 AM, Today</span> </div> <div class="img_cont_msg"> <img src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUTExMWFhUXGBgaGBcXFxgXFxgdFxcXHRcXGhcYHSggGBolHRYVITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGhAQGyslHyUtLS0tLS0tLS0tLS0tLS0tKy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIAOEA4QMBIgACEQEDEQH/xAAcAAACAgMBAQAAAAAAAAAAAAAEBQMGAAECBwj/xABBEAABAwIDBAgFAgQDCAMAAAABAAIRAyEEEjEFQVFhBhMicYGRobEyUsHR8ELhBxQjcjOS8RUWNENigqKyU3OE/8QAGgEAAgMBAQAAAAAAAAAAAAAAAAECAwQFBv/EACkRAAICAgEDBAIBBQAAAAAAAAABAhEDIRIEMUEFEyJRMjNhFCNxgeH/2gAMAwEAAhEDEQA/APQq9PM0gbxz13bxv5rxnb+c13kVKbXNOjw4VALACNXtOY24CbCFfNodLX1AAwZZY4MiILhEDsiZtxOnNUfpPiKdWMQwAO/W0FrSdTeXkuIJ1DRKmpplfktf8JRWbWc0gPptY64uWkuZbX4SQ8Rujgrr0hq5g5rg46RbSI08JXjnQzpg7AVn1GsFRtRkOYXEGxkEG97nirmOmxxbbYd1KSXSarR2QZzBoEmADyKhKq2aceRxkn9CnpoGOytboAdL8PVeYVWQTxk+69E6SuljKosHkgCZMa35gqnfy7alSoC6L25ws+J6o09TJTlyiAbNDutaBqTaPP6FfSGHORoDKNAWF8gM21nevn2hT6pzX5hLTLeIt7EWK9i6KdJ6NejTaajBVa0Nc0mDYRInWVZRkZYcViC6jVD202gscIAjUEQTwuV4rtDFmlZph2s2ieHeYPkrx0yxpnqmk6SeF5i2h3a8V57jgKjoiQDabkxIJ9ChLdhekK62KrPu6XRxmY8NVJRqPGkDvGikoVQHQ2bbtN+hBKkq12zDjra14n2UhA7sQ/iJ7kNXa48STyR73AGMpka6QPX2XHWuvkbpvQAJR2c49pxDe8wpHspCwOY8hbzNvJRV6jnGCQY3WP3RWGwObUwOIiPZAA7WuJ+ENHEwPr9F0eqGsuPJGHZzeTuRdr5iPVY3BHcyOcA2QAOGM1aw+Q+yyriQ21xyCIds8zJdHfI9ioqzLRDT4n6piAztFs2D/MLn/aLdAHDmY+i5xNEHS35yQVQXj2ugAmrip3+32UZfOoEodzY/dFYBgLmtcQGkgE6wCdbJMaLD0XpZi93/AEtHq5XTZdd7aNekwCHgF17wJFvOVHT6EOwbBVZVbWpuaM0AgCYggzDhrw1Rez6WZ7WBoGYEbxaDvJNrBVWpIspxkgF1Wp1bXNdEDqrawO0IUOCpONWmTJOdtzzMazzRbXZqVOoAMjqj2tcB2C4NiBxPZPktU6rmkOBAgg6Dcpxk3BoJv5II6p/4FiYZhwH+YLazcWavcRRMZWL6DS1zHQ5pgAghzjqXT2b2jW66pUDTqt6xzmU3uAqgSS0kw+wNxFwE/Z0bZQw1VjSXFwkk6y27dLSOO9C7ZdSp0qbxmJqMl3a5g+hOvNW7iUuSyulGkC4rolhTX/o4sClBJfVYdRfLDIInSTwKS4+pToNFNpeRmJMOJI+UAmzeMDdxXdSg+R1dCoC+MhcDlkn4pbYnS5Qm1cFUa91OqYLYOgNuMjXvUqfllUYtSobYOp12zze9Ct3nLUE+5cPBVnEnK5zhx99U02DULevpXAdlMR8mb6EeSW7Sw8XBsTB74lCVSY0viAuxBO9S4SscwvvHuhHs1Cn2ZhnVKjWtmSR7q0rXcve0qzi0OBmAbkk5i0WEndoq2KxhgBu2JO8edjonm1HhoYNcr2g33EO95Vb2thSycrszSRB4gnsn1jkQUgbJWiS4kuMncDpxJ3IhhaBZs905fGBB8Uqp5mkBxOXi7NB36jcmrMKCAWl27Ql7b8HC48QUMDH1HE2ad3ICO7cozh3v+J2UcCHZfT6roNM2mf8AMPAb/Bcio4T2r30BHoQhAdEOb8JbHIkTy4FDV8XV+b1j6LVSTfKTzAj7BDVqZFiT5T6oA76walrhxgz9Qo31mfMfI/usFASIDr6TZcnD8u8Xn90xHXXu/TVt4j3ChqYmof1zHMe0LsYPh9Quzg5bb4gfNMAR2Kdo8X5k/VcdZ+H/AEU1Si4W1HCELk5JAd5xvH55rttY/N9FzRaN65dT5oAunRPpDXaw0nF1ShGWILuqJktI+VskhN9hbQZWYGzq3QmJ3Ec1RNj7UdQcbWcC0931V1wWzMNlo56mRpaDnB7YeIm8HW27eq+NOyxytIe1qAIaCJDPhBJIbaLDQGCRbitdW1dUNs0TI7FjDSaYl/MDjx04wisLj84MQI1GVoPLQJ2iptg+bmfNYmHXu+ZbRofJm34tjy+mwgwSL6xuMdyX4XDU6tFrXMJLDUpmCNA4OYYmR39youN2vWbU+UzIAaBodIA/JVo2X0hey7WjLUOZ5y/CYguEjukKMvslhbuh9VZUDIYwEiwzvyxzsDKp3Txz6Zp1HBmaIImQQ0ggEaibju7lc8VtKWtcKmbfaBA0Lrd6VYrZ7Kjy97Gvfxd2tNNbJwp7J5OcHT0VKkzrNotL2ZG1mN1duqUhlcI3Zgg9v4J1Bz2OhwDtRvtAPfBE9xXoFPDgQAxttwjdyjcl/TijRqYUtZlFSm4OgCDrDt28EeSjJvkirk4nlsk6qy9EMOAXPOoAA4wTJ8TB8lX3U7wrL0StTe/i7xhrTHuVaM52hL3PEwC6e+IhQMYAMr3jS2hmdYMme5d16TnPBBtz019DCzEbNgdkzG4ye+27dfmgQXiMGAyC3M03BGt50/N5QGHpgNcGkggAtPhlMnhZp8Udg2viHOtltqY5X0i3mhamIAbMSGkhwGoG+O783IAHbXmzoPM39QuXPINiSP7gR5ESFDXe2QRZcPqcDu8x3IGgym0QTbdu/OakETOn5uSqlULjM2Redw3juKVjoLquabfngoyRvv6eaFNcb9O9a60agwmFHdTXswe68hDurHh9Fy6sDqR9udly4zo4JkaN1KkqBx5Lqowje36+iGqNQBM4W/Ch3HcugDExC0XygDnMmGCxHZyybacI/YpY8QpcHUyvadwISYy9bD2cSDUqPJefhE/4Y5DcT+yZMrOpuzDUeo4LWy8Z1jN2YRPO1lDjKxNyq72DHP8AvCz5XeixVnOeI8lidioA22JZRrttoTH/AFgfVpRmxsVTDRmIubk35g8d672cHOw5Y4dumXCHfNTIqNB43BHmpul+AZVZTxmHhrakGo0GMj7g27wQecHQp6Yskfo72ltUNqANLSXtyyHANgyB2pgapps3bNMsaHn+pEFrRcETzgyLrzetLSI4+Ewp6zWudnzTETxdbgfhO7XcklXYlxuF32Lk7bxbUDgDbNIlpIBEaC5Jsgsfif5lmZz221AMuJFwHchuVeqU5pF7Nzhm4jh4SoamJOjSQN/AfdR4u7NePPCUHCa/wb6v4nHhDfG0+qsHR8ZGjlc+31Crzqjv1HUWn3VnwQy0QADMEkxeT+eytMjVMIxOEksiwOU8+X1UWJaBUmBJEbwHC+vOwHgjesDerJ32+vpMJRtPEwQJ+ExutB177HzQImosgkgmC05ZAkcjx3Ku7SrFry5sQ6ZHH95BUzcduFiJ3eDlwXZwQRv7Q3jmEAAhwO6VtrDrp9E32FsttR+V099h6pjjOjNQXAkHeBZQeRJ0WxxNqyt0pGg+yLdSc0STrwCa4PZTmH+qwjh9xx/dHuwAqA+kyoc9k1AqbHiYi/fCytVn9IHqm2J2WZj9XDcf2QGJ2ecpI+Iat/ZTUiEosXOj9rfVazf9IjjP4FzUYdCPRQlo3lTINEtV9vyB3LimT+fnNaMLXWwmRZskb5UzAO5D9aN3nClw9KSPdAWcV6JHcoaeo7wisceem5QMYZE8QkMsOBxbqbtfDiE7NTMA4FVuZHPci8BjA0w423qADXPyWKXrqfzNW0goYYih1eIOhFSnSqyNHFvYeR36+KHr0mEPY6A3IMpgZg5vwjSS0iAY79ykJLm0XOEFtR9IxoG1RLY/7gpMTh8xFrkD9x5hS8g+wqFGm2k+mWglxJDyzQQLAm/E6pJU2VUy5mjM4n9JtHH3VrZRa3W0rYc1o7MeWnehRSJyyylBR+ivYLZ5aHsdJz5A6JganUcyEyw/R/qnh7agaGmwc2SRvB4zJTDryeHr9VrUi5OlkMqS2Mto9H6GKwjqjaeWqztBwsSGuOZpA1loPmqtiqxa6ZsHNB4WFx6r0LowTlcAwhs753x9ivN9qkFlRukkkeIHt9FK7H5GBrB+sgSCBEHTgqxteuesdfX33+onxTn+Zzm2uUEbpgR+eKS7TdmJPE/X/RAxYa51lN6WFL8r2zLvtdJmtJMC5XrfQ/osamGa4dh4uCQdY0PEGyryTUUW4ocmKth7HeDJkcwB7Qr9hGMpMmpJaBv38gAEXsjAEDttylpMiZHIzvslvSPESYmwtAke3csblyezYo0qRXeke2hUJaGBjByuefLeqnUx7m/C4z3nTn+ye4ykXGzB3fgSnF7JzauAHAx9YWiLVFMouxbi9sgwHCSOBvK3T2sHESRPke6d6kd0dbudPcfooH7Ey8VLlEgoSCH4Ev7TYnvHCygqbLzSHABw3wT6tRuBokEXsrFSwwcBI+6i8jRasSZ5xi8MWmCO5QCmNfZejYrZ7SCHCe9V7HbHpCTA8LKay2VzwVsrTQ0b/VSmraw8dSia1Om3T6oGrB0KtTszyVEtKk0XcbcDvWqcOqC28IGUx2aJe2x3+x+6TEhvisP+oaKNl9yaYSmSNJChxGEymQIURgvVt+UeSxT5DyWIFZZaTi9r25Y7Ie3jmY4EW8XKXHgENcORB39oT9V3haRzAgGNDusQQfdbbQinJ/S4tM78jvsQm+4l2FpudZKJw+znuvZvfPsnGVrdIHcFo1+amRB6Wymj4jPmPqp20GNiAB7qN1fmohXkwL9wJSY+4aK+XQkdxhUrpJs8sbTdBh5f5NcWa8wPRW4U3ndHejdq7NFWjhGkA/4gkf3k+8qqc+NM04MLnJxZ5U6vle08onvEj3RtbDNcy36rjz09Crd026D9W1houGYN7TTa8buCp+zmvkMIM3gExfh5j1TjkUlojLFKJnQ7Yz6+Mota2QHB5/tae1r3r6QweDDWwAvK/wCE2EnF1HgdkUzHe4tkd+q9fpLPmdyLYfGAs2nhDEtsVTNsYXEC7aXWdy9JcyUPXozoqePkshl8M8J2vsvaDv0FjeAtbiYuVVttbMrUXSc5aQIcR2TxuLW719E4vCvM5Y7iqVtXZtRriere2ZnJdp8NFbDLXgcsakeebA2C+tRdUJLb9g7jAuYi4mLrnC4h9N5pvM+oPcTcK21mO0l/+SPZQ0+j4e4l1N2szJHoFNzsI4mvILs+iCYg+SteD2ZLZ1WYPZMQY5c45q17JwkNjwVLey9aR57tqlkkqn45zn74Xp/TjZ56p5Gv7hePbSznPEjLoDqb3MK3FspzS0RVdm5tH34IOts+o3UeKI2aKlSo1gPxGNAQBvOiOr1alF/V1e0DoRMH7FaLaMtJiF9NM9hsmXRpYc51+iK21hQKZeN8R4kJ90X6MF+EZUDu255ME9nKJHnaU7tEJLiQuwcgDMRy3JtsrZDqrgHDsjU7iBu709wWwKbbvOd3/iPDf4pqwAaeSVMrcyL+TZ8jViJlaTpkLEr6v4EHiapDnN3Oh+vzCHeqnxG18HSs+sHkbm9r/wBB7lVzbPSxj3TTpmIiXBree4E+qbRNDykHOaIbuudBbW5WqrQ0S54A5fc2VGrbaq3h+UHWPufFQ4fD1axGVr3kmxgn/wAjb1T7CouGI2xh2NzSXgmBFwSNRuHugh0tq5XCi1rGgSc5kcoAiXa+RQ+zujIe3+piW0yLBgp1KrvPstHmn2ydjUKbSHYek8n9dTO49+Quyj996hyvsiSXkrTa+OxQn+sWXu2m8U/NggDmZXqnQ/BBuEw7TUZV6uo4Zm5o7ZLsvaANs0TySfD0crcgc/L8uZ2XuyzpyVg6PEFr6Wmjm+Fj46KrLGTjs09POpgXSoOqYk02yZ3JlszoyynhKjXNBe7M4E6i1iOGhTDZ+DFV4qOtUAIncUXQDv6jXTEfgWVWjbll8eC8UVP+GWHLHYmREuaWndBB3+Sv1J91UeiRLX1aZEGQdLEDQ+UWVow773Tm/kZ3HQxaJXfV2UNJyKDVOOzLK0AV8PyS6tRBT+qBF0DXa1SlEuxZGI3bNadQu2YFo3fRGPAUTsU0b1DsaVb7GmbPndZHsw0aKLDYxrj2U2pMEJxSZny5JR0ys9JcFmpPHFpXljtnTff9l7VtenLSvM304qOHNJtxZfi+cNlUrbPIJLew46kNFxwNpQGJ2L1rgXOJPdEfl16A3Dyo62BH5+c/RP3WN4kUTaezIw75vka5w7wx0eVz4K7dHsJ1WFoUyILabZ7yAT6kpdtLZhqtcwODZBud+7L3mYVhzbgtOF2jJ1daSOw1aAWg52gC5yk8VaYyWyxDzyWkCs8iobKr1BmZSeWzBMWHfKs2zejNNrQ59UuqX7LaM5ZbHxOdFpO5OaWDawuG+e0ZBBPG1j3o2myyi02WWV/BdH8r87hTLpkEh9u8TkP7p71T3j+pUdU4AuIbH9unhopoXYHNPihWyNlENFgB3QFknNEWiff9vNS2WZwmI6auX7WGGisZAYRI+adWjmbrh9YQqh00rE5Gk2vA+vMwk1qiUW0z3vZ4pVmNq0iMr2ggjeCEW7J8A1j8914D0D2/jabhhqP9RjiZaZ7Em7g4aC+i9c2FQqtxAdUdOambbhdp+ixT+Lo2cOUXKwtmygypnae9TFMKoQtVirmiUJ33Mo1IRZxPNAFCV8RCSlRL2lJjGvjgRqltfGgXlLMXtCN6R43afNPk2XQxRiOMftbh5pRQxRrVW09zj57/AKJXQc+u+JsrKMB1bWupjtMuOdrz5lBdaSLTg9n9WBDbJrhhbW3svKsV/ECvTqZXYdxA/LcVZdg9NmVwYOVw1a4QR4cOasi1HZiy4py8ln2m6AvN9rkiqHNEtE5o571ZNo7caTBIVRqbWYKpk2hRlLk9F+GHCOxphHAomqyyr+G2oMwYGuuTDgLDhKcHESFCiwXVoFRp5/Qo2d6U7Sq9pscfoVaqOwHm5e0DlJWrC6Rg6yO0xX1vNa61PG9HG73nwACKpbDpDUE95+yv5GLiVjruSxWz/ZNH5B5n7rEuQcTzylhwBEWUzaaJ6orfVKwAbIVmUKbqlnVlICOyhczXginNsoYJQBC1iQdK8AXtY4C7XHduIVl6k8fBdHCzu+6GxoB/hDg4rVXxHZDTzvP2XsXVDXf+fYqodEdmikC4CC8kxppafRXGdCsE3cjVK1FERqiY3gSoarluqYMISpVvCrky2ESKtVhLMZWOqJxL0txrxEWlQNC0VvauOINknDXVDeQ1PcVhQZ3lIsc98EMaSeXsrYUE2/BY9jvpsgD8KstOuNF5lgDiqRzVcO87wWFro8LFPaPSGoIy4Sq7vEeyk0RSkx5tPCNfqO8wvPtsgMqdiQ4WDhY+is9XpnVFn0AwcHBzfUhLauOw9R2ZwyHUzcHiBCaJODoRVMZW/UdURsnD9vM7tO5390VjcZQJAaR36e6ifUDNDbjZNkfxGWcB0oh2LgWSOtipDYN1JnmBxSonF8nSDTXzGSi8J/FLqqjsNXpXY7I2oDYgRllsTMQlzLFVfbGyajsfnynq3Fjy7d2WtzCeMhW4mth6hi4uMD2P/beIcJaxoHcT7lDu2vWd+sjugR6JXh8aYaWyOSOqVJJMeSug7jZycq4ypEn8/V/+R/mVihz8liZXbNRzWNbOgnuurY3Z9IERTbrOkm3fzR1mjcB5JcyagU1mz6p0pu8o91O3YVY/KO932BVjq4+kNXt859kJV25SHzHuH1MI5MdIAp9HCR2qnk37lEUujlIal7vGB6KM9IgbNpn/ALiPoFC7blQ7mjwJ90bYWkNaeyaQjsDxv7optFrbBoHcAq0cfWefjN9ALeUKwbKwjmiXklx4kmOWqqyy4osgrJadP0KYUnWhDxD43ELb+yY8ljUi6W9A2IeTNrj1QdV8gXU+PcYzDdqk1bGtEGeyfRQk9mjHHRlerG9LcRWkc1PiKoIsljgZjXmmiyiG5KY7JwQmXBRUqE7k5wDALEFOwJ34dutkBiMc2me0zxCcGnASTadKQbT4JpjTIa21aNQEFzXDg4D2KruO2Vh3mQ2P7HFo8hZA7V2OSTAcPNJqmGrMtmeB4q5bJc68Bu0NkURZofPN5Stmx3kf4jomwm37o/DYZ5d2nE+aaNZlanyorklIX7PwkCXGY0XbNSfJaqvJOUeN1I0WSbOj6d0/KfN9kdjVO8PgBVoEyBDrTNjzMWFykgNuYKf7BeS1zZMET5qzEk5bMnq1xm2Q7NbJN5DQb6ab12cQePqtj+m10/q7LePM9yCLgtjSSpHAtydsL61bQfWd6xQoKLPV2hVdq93gY9kM9zjqSZ4rkrbjCdEjIUVeGgucQ0DUkmB5ITaW1W098u+UfU7lXNq7VqVRBDQAZiT6ooEhq/pFSaTlDndwgeZKjp9L6Vg5j2ibnsujjYESqpVqG8geBQdWDofBOiR730UxGEqtzYeq2o79W57eRYbt8k9c6F8x4fEvpPD2Pcx7dHNJBHiPZeldDv4lZy2hjCA42bXEAE7hUGjSfmFuQWLNhl3WzRBp6PTa3aFtRopGv61nBw9CELTqQVlXsnO3Q6/dZbLXD/gOatyDYjUfm5INtYLV1PxbPsrFi2CoJFnDTnyKQ4qobgiCNRvUGaYOypjaZbIM242I8OC23aYF/rKl2vgw4ToeI/Lqo4oVKZI/PJWximEnR6DgdpNO9N8NigYgryKjtgtNyU0w/SUjfbw/Ap+0/BX7iPW6VYHepKjgvOcH0rB1cEcOkgj4rd6XBj5IsmKcOR+iV18K06gJTU22HWkqF+1rWT4MfJB1TCtboAkm0qt8rRJ5LjE7YJs2Z90EcPUccxdlUuNdzV03TyzPS0TUKETvJ1Uj2wJ0Cjq41rBAh7uWg70BUxj3fEfBFHoI8MUeMQ0vhjnk2Any3qLZ/SNhd1bJk6OsBYEnW/BTNoZqTm73NI8wiOiWw8NTB605nuBE6FunwjcfdX4pRim33OH6tilOSS+jf8yXXMk7vt7LtkqWtgQx0TPAjQjcQugAtC2ebcWnRFB5raIy96xS4sfty+hcemdMH4CR/cJ/9Sgcd0qc+Q1xY3gNf832Shz1C4ooYX/MsM9onxhQ1Hc3Dxn6oR4HALWfiPJMDp73jQz3qE1mk3kFF02tdo6DwdceChr4Wd0cxogCMttxCgq0vJcua5uhWCvxSHZf+hf8QTQaKGKDn022ZUHaewfK4E9sDjr3r1LZe1aNdmejUbUZocpmORBu08iF84SDyU+z9oVaDxUovdTeN7TE8iNHDkZWXJ0yltF8M3hn0U7s93tyQG0abag4OGjhu+45KldH/wCJjXxTxbch06xgJYf7mat7xIVlq4xrgHscHNcJDmkEHxCxThKHc245KW0JsWHNOV4h27e13cUlxjAZkQVZcVUa9uV1/dV/GNLde03jv8eaIkmVvGYG5MfZLamEhWaqEDXp8lojIzyiI+qK6ax24nzTLqwugwKfJkOIHTY/5iisPh3G5eY8l3lGgRLIAhJyNvR4FlyJPsjuiMvw256nzK25gPxEu71H1i11nC/ddVnpYRUVSVBNKk0aDwC6bgQ42ssoYJxu45G89VvEbUYwZGGeaCUnFL5BnWhrmtB018FHtekDUD26uHr/AKITBjPe95uiKtMtLWA9qQWuNm3GhPHVTSOP10uU0/4GuzmtqMFMuDngSzeXZjdo8iQOSFdjmB5YIn6jUDut6qHC1+rDqhDWVqUuzD9fJvEadyRUQQesce3Np75Lj7rd0qb2zBHDHnzLNmfw9Fi56w/IPMra6Vm3X0UZ3eo3PK25RuK57PMGdZO+65f5LlwBUZkcx6pBZJ1sa+YRdKvzS7NP2XTDwQMPcAUHVocFIyqui+e9AABCwVeKIqDj5qB9JAjZMiyJ2VtuthnTTd2Tqx0ljvDceYulzgQszg6qLSemSjJp6PUNk7fp4hsskOHxMPxN9O0OYU1erqvKqVV1NwewlrhoR+aK67E242u0g9mqBdvzRvb9tyxZMDjtdjbjzqWn3Cq7OFkBVJBuj3c1FUYoIsaACVy2k5MWUgbR5olmEHBTsjxFgDWCXeKnO06B1YfBp+ylLWzBI7lt+GJ0UW7PQ+m4ZQx8l3YM7a2HH6D4grR6QCP6bPIQphs4C51W/wCVA0CWjc/d+0LnY59X43OA4AI3C4BpuDPepjmHwhqGqU3ON3X5KRU/jt7G2AIFRrYsJ07k0pbPFWWPJIHw3Ai2vKJKW7GwhgvO4Hx4lNWOH3V0YXE8513Wf3xZt+GMZScBIMyNHAAAH19EgfXa1rqj/hFo48kXtyoOtyt3Q3x3+pVT6RY2SKbfhHut+NcIEnl4w5Dn/e1vyu/zLSpmZYn7rKf6qQ+cSFznHcUQSoKjJVTOWcvXBK4JIsdFjikBp48CuRU46rCVG47j4IAIa9d50LTqbjquy5AwkPWi2NFCHLptRAGyVDUoBESCuXUjqECAbhdU3kEOYS1zTIO8KZzuIXBY06GEDRZMH0mYQBVaWu3uaJb3xqO66cse17Q5jg5u4gyO7v5KgGmeRUmAxtSg/My4/U3c4c+fNZp4F3RohnfaR6FQoyQTu9EXX7LSUDsjaVOqzMw94OrTwP5ddbaxwZRqO4NPqLesLLT5UbLVWQ4qg5jsr27gQZGjgCDz1WU6R/S49yTbF2uP5Uda4g0SGTBJyPksBgbiHgeCKb0iww/5jj3McrZQd9ju9L1mF4lckv8AY0a928HyWOSWp0oobhUd/wBoHuUtxXSs/wDLpBvNxJ9BAQsUvonk9T6aC/KyykTxPJQ0qwfUFGmQ6odYuGDe5x+nFUnFbVrVPiqGODeyPIJt/D+sW4sCJzNI7ogyFbHD9nH6n1dzTWNV/J6hSZlsP06fn0W3PABO6JOgsB+y6ul+3KmSi8zd0NHjr7FXxWziQuc0vsqONxPx1TxJHedFSazyTJVh6R1srWsHefRVtyun9G3qJbUfo5WLcrFEzbLDh1p2q2sSZSD1lDTW1iQGlDWWLECNvXe5YsQM6K2sWIGbRDFixAEWKQK2sQB1TWjqsWIGO+hf+O7/AOs+6b9MP8MfnBYsWR/sRqh+oRbM/wCDxv8A+b/2qJA7VbWLV4Mi7s2FzU1W1iGJHIVk6Af8a3+130WliQz1j90m6Tf4bf7x7OWLFLH+RZ037Eeb9Jf8XwSRYsU5dy7P+bOVixYokD//2Q==" class="rounded-circle user_img_msg"> </div> </div> <div class="d-flex justify-content-start mb-4"> <div class="img_cont_msg"> <img src="https://static.turbosquid.com/Preview/001292/481/WV/_D.jpg" class="rounded-circle user_img_msg"> </div> <div class="msg_cotainer"> I am looking for your next templates <span class="msg_time">9:07 AM, Today</span> </div> </div> <div class="d-flex justify-content-end mb-4"> <div class="msg_cotainer_send"> Ok, thank you have a good day <span class="msg_time_send">9:10 AM, Today</span> </div> <div class="img_cont_msg"> <img src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMTEhUTExMWFhUXGBgaGBcXFxgXFxgdFxcXHRcXGhcYHSggGBolHRYVITEhJSkrLi4uFx8zODMtNygtLisBCgoKDg0OGhAQGyslHyUtLS0tLS0tLS0tLS0tLS0tKy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIAOEA4QMBIgACEQEDEQH/xAAcAAACAgMBAQAAAAAAAAAAAAAEBQMGAAECBwj/xABBEAABAwIDBAgFAgQDCAMAAAABAAIRAyEEEjEFQVFhBhMicYGRobEyUsHR8ELhBxQjcjOS8RUWNENigqKyU3OE/8QAGgEAAgMBAQAAAAAAAAAAAAAAAAECAwQFBv/EACkRAAICAgEDBAIBBQAAAAAAAAABAhEDIRIEMUEFEyJRMjNhFCNxgeH/2gAMAwEAAhEDEQA/APQq9PM0gbxz13bxv5rxnb+c13kVKbXNOjw4VALACNXtOY24CbCFfNodLX1AAwZZY4MiILhEDsiZtxOnNUfpPiKdWMQwAO/W0FrSdTeXkuIJ1DRKmpplfktf8JRWbWc0gPptY64uWkuZbX4SQ8Rujgrr0hq5g5rg46RbSI08JXjnQzpg7AVn1GsFRtRkOYXEGxkEG97nirmOmxxbbYd1KSXSarR2QZzBoEmADyKhKq2aceRxkn9CnpoGOytboAdL8PVeYVWQTxk+69E6SuljKosHkgCZMa35gqnfy7alSoC6L25ws+J6o09TJTlyiAbNDutaBqTaPP6FfSGHORoDKNAWF8gM21nevn2hT6pzX5hLTLeIt7EWK9i6KdJ6NejTaajBVa0Nc0mDYRInWVZRkZYcViC6jVD202gscIAjUEQTwuV4rtDFmlZph2s2ieHeYPkrx0yxpnqmk6SeF5i2h3a8V57jgKjoiQDabkxIJ9ChLdhekK62KrPu6XRxmY8NVJRqPGkDvGikoVQHQ2bbtN+hBKkq12zDjra14n2UhA7sQ/iJ7kNXa48STyR73AGMpka6QPX2XHWuvkbpvQAJR2c49pxDe8wpHspCwOY8hbzNvJRV6jnGCQY3WP3RWGwObUwOIiPZAA7WuJ+ENHEwPr9F0eqGsuPJGHZzeTuRdr5iPVY3BHcyOcA2QAOGM1aw+Q+yyriQ21xyCIds8zJdHfI9ioqzLRDT4n6piAztFs2D/MLn/aLdAHDmY+i5xNEHS35yQVQXj2ugAmrip3+32UZfOoEodzY/dFYBgLmtcQGkgE6wCdbJMaLD0XpZi93/AEtHq5XTZdd7aNekwCHgF17wJFvOVHT6EOwbBVZVbWpuaM0AgCYggzDhrw1Rez6WZ7WBoGYEbxaDvJNrBVWpIspxkgF1Wp1bXNdEDqrawO0IUOCpONWmTJOdtzzMazzRbXZqVOoAMjqj2tcB2C4NiBxPZPktU6rmkOBAgg6Dcpxk3BoJv5II6p/4FiYZhwH+YLazcWavcRRMZWL6DS1zHQ5pgAghzjqXT2b2jW66pUDTqt6xzmU3uAqgSS0kw+wNxFwE/Z0bZQw1VjSXFwkk6y27dLSOO9C7ZdSp0qbxmJqMl3a5g+hOvNW7iUuSyulGkC4rolhTX/o4sClBJfVYdRfLDIInSTwKS4+pToNFNpeRmJMOJI+UAmzeMDdxXdSg+R1dCoC+MhcDlkn4pbYnS5Qm1cFUa91OqYLYOgNuMjXvUqfllUYtSobYOp12zze9Ct3nLUE+5cPBVnEnK5zhx99U02DULevpXAdlMR8mb6EeSW7Sw8XBsTB74lCVSY0viAuxBO9S4SscwvvHuhHs1Cn2ZhnVKjWtmSR7q0rXcve0qzi0OBmAbkk5i0WEndoq2KxhgBu2JO8edjonm1HhoYNcr2g33EO95Vb2thSycrszSRB4gnsn1jkQUgbJWiS4kuMncDpxJ3IhhaBZs905fGBB8Uqp5mkBxOXi7NB36jcmrMKCAWl27Ql7b8HC48QUMDH1HE2ad3ICO7cozh3v+J2UcCHZfT6roNM2mf8AMPAb/Bcio4T2r30BHoQhAdEOb8JbHIkTy4FDV8XV+b1j6LVSTfKTzAj7BDVqZFiT5T6oA76walrhxgz9Qo31mfMfI/usFASIDr6TZcnD8u8Xn90xHXXu/TVt4j3ChqYmof1zHMe0LsYPh9Quzg5bb4gfNMAR2Kdo8X5k/VcdZ+H/AEU1Si4W1HCELk5JAd5xvH55rttY/N9FzRaN65dT5oAunRPpDXaw0nF1ShGWILuqJktI+VskhN9hbQZWYGzq3QmJ3Ec1RNj7UdQcbWcC0931V1wWzMNlo56mRpaDnB7YeIm8HW27eq+NOyxytIe1qAIaCJDPhBJIbaLDQGCRbitdW1dUNs0TI7FjDSaYl/MDjx04wisLj84MQI1GVoPLQJ2iptg+bmfNYmHXu+ZbRofJm34tjy+mwgwSL6xuMdyX4XDU6tFrXMJLDUpmCNA4OYYmR39youN2vWbU+UzIAaBodIA/JVo2X0hey7WjLUOZ5y/CYguEjukKMvslhbuh9VZUDIYwEiwzvyxzsDKp3Txz6Zp1HBmaIImQQ0ggEaibju7lc8VtKWtcKmbfaBA0Lrd6VYrZ7Kjy97Gvfxd2tNNbJwp7J5OcHT0VKkzrNotL2ZG1mN1duqUhlcI3Zgg9v4J1Bz2OhwDtRvtAPfBE9xXoFPDgQAxttwjdyjcl/TijRqYUtZlFSm4OgCDrDt28EeSjJvkirk4nlsk6qy9EMOAXPOoAA4wTJ8TB8lX3U7wrL0StTe/i7xhrTHuVaM52hL3PEwC6e+IhQMYAMr3jS2hmdYMme5d16TnPBBtz019DCzEbNgdkzG4ye+27dfmgQXiMGAyC3M03BGt50/N5QGHpgNcGkggAtPhlMnhZp8Udg2viHOtltqY5X0i3mhamIAbMSGkhwGoG+O783IAHbXmzoPM39QuXPINiSP7gR5ESFDXe2QRZcPqcDu8x3IGgym0QTbdu/OakETOn5uSqlULjM2Redw3juKVjoLquabfngoyRvv6eaFNcb9O9a60agwmFHdTXswe68hDurHh9Fy6sDqR9udly4zo4JkaN1KkqBx5Lqowje36+iGqNQBM4W/Ch3HcugDExC0XygDnMmGCxHZyybacI/YpY8QpcHUyvadwISYy9bD2cSDUqPJefhE/4Y5DcT+yZMrOpuzDUeo4LWy8Z1jN2YRPO1lDjKxNyq72DHP8AvCz5XeixVnOeI8lidioA22JZRrttoTH/AFgfVpRmxsVTDRmIubk35g8d672cHOw5Y4dumXCHfNTIqNB43BHmpul+AZVZTxmHhrakGo0GMj7g27wQecHQp6Yskfo72ltUNqANLSXtyyHANgyB2pgapps3bNMsaHn+pEFrRcETzgyLrzetLSI4+Ewp6zWudnzTETxdbgfhO7XcklXYlxuF32Lk7bxbUDgDbNIlpIBEaC5Jsgsfif5lmZz221AMuJFwHchuVeqU5pF7Nzhm4jh4SoamJOjSQN/AfdR4u7NePPCUHCa/wb6v4nHhDfG0+qsHR8ZGjlc+31Crzqjv1HUWn3VnwQy0QADMEkxeT+eytMjVMIxOEksiwOU8+X1UWJaBUmBJEbwHC+vOwHgjesDerJ32+vpMJRtPEwQJ+ExutB177HzQImosgkgmC05ZAkcjx3Ku7SrFry5sQ6ZHH95BUzcduFiJ3eDlwXZwQRv7Q3jmEAAhwO6VtrDrp9E32FsttR+V099h6pjjOjNQXAkHeBZQeRJ0WxxNqyt0pGg+yLdSc0STrwCa4PZTmH+qwjh9xx/dHuwAqA+kyoc9k1AqbHiYi/fCytVn9IHqm2J2WZj9XDcf2QGJ2ecpI+Iat/ZTUiEosXOj9rfVazf9IjjP4FzUYdCPRQlo3lTINEtV9vyB3LimT+fnNaMLXWwmRZskb5UzAO5D9aN3nClw9KSPdAWcV6JHcoaeo7wisceem5QMYZE8QkMsOBxbqbtfDiE7NTMA4FVuZHPci8BjA0w423qADXPyWKXrqfzNW0goYYih1eIOhFSnSqyNHFvYeR36+KHr0mEPY6A3IMpgZg5vwjSS0iAY79ykJLm0XOEFtR9IxoG1RLY/7gpMTh8xFrkD9x5hS8g+wqFGm2k+mWglxJDyzQQLAm/E6pJU2VUy5mjM4n9JtHH3VrZRa3W0rYc1o7MeWnehRSJyyylBR+ivYLZ5aHsdJz5A6JganUcyEyw/R/qnh7agaGmwc2SRvB4zJTDryeHr9VrUi5OlkMqS2Mto9H6GKwjqjaeWqztBwsSGuOZpA1loPmqtiqxa6ZsHNB4WFx6r0LowTlcAwhs753x9ivN9qkFlRukkkeIHt9FK7H5GBrB+sgSCBEHTgqxteuesdfX33+onxTn+Zzm2uUEbpgR+eKS7TdmJPE/X/RAxYa51lN6WFL8r2zLvtdJmtJMC5XrfQ/osamGa4dh4uCQdY0PEGyryTUUW4ocmKth7HeDJkcwB7Qr9hGMpMmpJaBv38gAEXsjAEDttylpMiZHIzvslvSPESYmwtAke3csblyezYo0qRXeke2hUJaGBjByuefLeqnUx7m/C4z3nTn+ye4ykXGzB3fgSnF7JzauAHAx9YWiLVFMouxbi9sgwHCSOBvK3T2sHESRPke6d6kd0dbudPcfooH7Ey8VLlEgoSCH4Ev7TYnvHCygqbLzSHABw3wT6tRuBokEXsrFSwwcBI+6i8jRasSZ5xi8MWmCO5QCmNfZejYrZ7SCHCe9V7HbHpCTA8LKay2VzwVsrTQ0b/VSmraw8dSia1Om3T6oGrB0KtTszyVEtKk0XcbcDvWqcOqC28IGUx2aJe2x3+x+6TEhvisP+oaKNl9yaYSmSNJChxGEymQIURgvVt+UeSxT5DyWIFZZaTi9r25Y7Ie3jmY4EW8XKXHgENcORB39oT9V3haRzAgGNDusQQfdbbQinJ/S4tM78jvsQm+4l2FpudZKJw+znuvZvfPsnGVrdIHcFo1+amRB6Wymj4jPmPqp20GNiAB7qN1fmohXkwL9wJSY+4aK+XQkdxhUrpJs8sbTdBh5f5NcWa8wPRW4U3ndHejdq7NFWjhGkA/4gkf3k+8qqc+NM04MLnJxZ5U6vle08onvEj3RtbDNcy36rjz09Crd026D9W1houGYN7TTa8buCp+zmvkMIM3gExfh5j1TjkUlojLFKJnQ7Yz6+Mota2QHB5/tae1r3r6QweDDWwAvK/wCE2EnF1HgdkUzHe4tkd+q9fpLPmdyLYfGAs2nhDEtsVTNsYXEC7aXWdy9JcyUPXozoqePkshl8M8J2vsvaDv0FjeAtbiYuVVttbMrUXSc5aQIcR2TxuLW719E4vCvM5Y7iqVtXZtRriere2ZnJdp8NFbDLXgcsakeebA2C+tRdUJLb9g7jAuYi4mLrnC4h9N5pvM+oPcTcK21mO0l/+SPZQ0+j4e4l1N2szJHoFNzsI4mvILs+iCYg+SteD2ZLZ1WYPZMQY5c45q17JwkNjwVLey9aR57tqlkkqn45zn74Xp/TjZ56p5Gv7hePbSznPEjLoDqb3MK3FspzS0RVdm5tH34IOts+o3UeKI2aKlSo1gPxGNAQBvOiOr1alF/V1e0DoRMH7FaLaMtJiF9NM9hsmXRpYc51+iK21hQKZeN8R4kJ90X6MF+EZUDu255ME9nKJHnaU7tEJLiQuwcgDMRy3JtsrZDqrgHDsjU7iBu709wWwKbbvOd3/iPDf4pqwAaeSVMrcyL+TZ8jViJlaTpkLEr6v4EHiapDnN3Oh+vzCHeqnxG18HSs+sHkbm9r/wBB7lVzbPSxj3TTpmIiXBree4E+qbRNDykHOaIbuudBbW5WqrQ0S54A5fc2VGrbaq3h+UHWPufFQ4fD1axGVr3kmxgn/wAjb1T7CouGI2xh2NzSXgmBFwSNRuHugh0tq5XCi1rGgSc5kcoAiXa+RQ+zujIe3+piW0yLBgp1KrvPstHmn2ydjUKbSHYek8n9dTO49+Quyj996hyvsiSXkrTa+OxQn+sWXu2m8U/NggDmZXqnQ/BBuEw7TUZV6uo4Zm5o7ZLsvaANs0TySfD0crcgc/L8uZ2XuyzpyVg6PEFr6Wmjm+Fj46KrLGTjs09POpgXSoOqYk02yZ3JlszoyynhKjXNBe7M4E6i1iOGhTDZ+DFV4qOtUAIncUXQDv6jXTEfgWVWjbll8eC8UVP+GWHLHYmREuaWndBB3+Sv1J91UeiRLX1aZEGQdLEDQ+UWVow773Tm/kZ3HQxaJXfV2UNJyKDVOOzLK0AV8PyS6tRBT+qBF0DXa1SlEuxZGI3bNadQu2YFo3fRGPAUTsU0b1DsaVb7GmbPndZHsw0aKLDYxrj2U2pMEJxSZny5JR0ys9JcFmpPHFpXljtnTff9l7VtenLSvM304qOHNJtxZfi+cNlUrbPIJLew46kNFxwNpQGJ2L1rgXOJPdEfl16A3Dyo62BH5+c/RP3WN4kUTaezIw75vka5w7wx0eVz4K7dHsJ1WFoUyILabZ7yAT6kpdtLZhqtcwODZBud+7L3mYVhzbgtOF2jJ1daSOw1aAWg52gC5yk8VaYyWyxDzyWkCs8iobKr1BmZSeWzBMWHfKs2zejNNrQ59UuqX7LaM5ZbHxOdFpO5OaWDawuG+e0ZBBPG1j3o2myyi02WWV/BdH8r87hTLpkEh9u8TkP7p71T3j+pUdU4AuIbH9unhopoXYHNPihWyNlENFgB3QFknNEWiff9vNS2WZwmI6auX7WGGisZAYRI+adWjmbrh9YQqh00rE5Gk2vA+vMwk1qiUW0z3vZ4pVmNq0iMr2ggjeCEW7J8A1j8914D0D2/jabhhqP9RjiZaZ7Em7g4aC+i9c2FQqtxAdUdOambbhdp+ixT+Lo2cOUXKwtmygypnae9TFMKoQtVirmiUJ33Mo1IRZxPNAFCV8RCSlRL2lJjGvjgRqltfGgXlLMXtCN6R43afNPk2XQxRiOMftbh5pRQxRrVW09zj57/AKJXQc+u+JsrKMB1bWupjtMuOdrz5lBdaSLTg9n9WBDbJrhhbW3svKsV/ECvTqZXYdxA/LcVZdg9NmVwYOVw1a4QR4cOasi1HZiy4py8ln2m6AvN9rkiqHNEtE5o571ZNo7caTBIVRqbWYKpk2hRlLk9F+GHCOxphHAomqyyr+G2oMwYGuuTDgLDhKcHESFCiwXVoFRp5/Qo2d6U7Sq9pscfoVaqOwHm5e0DlJWrC6Rg6yO0xX1vNa61PG9HG73nwACKpbDpDUE95+yv5GLiVjruSxWz/ZNH5B5n7rEuQcTzylhwBEWUzaaJ6orfVKwAbIVmUKbqlnVlICOyhczXginNsoYJQBC1iQdK8AXtY4C7XHduIVl6k8fBdHCzu+6GxoB/hDg4rVXxHZDTzvP2XsXVDXf+fYqodEdmikC4CC8kxppafRXGdCsE3cjVK1FERqiY3gSoarluqYMISpVvCrky2ESKtVhLMZWOqJxL0txrxEWlQNC0VvauOINknDXVDeQ1PcVhQZ3lIsc98EMaSeXsrYUE2/BY9jvpsgD8KstOuNF5lgDiqRzVcO87wWFro8LFPaPSGoIy4Sq7vEeyk0RSkx5tPCNfqO8wvPtsgMqdiQ4WDhY+is9XpnVFn0AwcHBzfUhLauOw9R2ZwyHUzcHiBCaJODoRVMZW/UdURsnD9vM7tO5390VjcZQJAaR36e6ifUDNDbjZNkfxGWcB0oh2LgWSOtipDYN1JnmBxSonF8nSDTXzGSi8J/FLqqjsNXpXY7I2oDYgRllsTMQlzLFVfbGyajsfnynq3Fjy7d2WtzCeMhW4mth6hi4uMD2P/beIcJaxoHcT7lDu2vWd+sjugR6JXh8aYaWyOSOqVJJMeSug7jZycq4ypEn8/V/+R/mVihz8liZXbNRzWNbOgnuurY3Z9IERTbrOkm3fzR1mjcB5JcyagU1mz6p0pu8o91O3YVY/KO932BVjq4+kNXt859kJV25SHzHuH1MI5MdIAp9HCR2qnk37lEUujlIal7vGB6KM9IgbNpn/ALiPoFC7blQ7mjwJ90bYWkNaeyaQjsDxv7optFrbBoHcAq0cfWefjN9ALeUKwbKwjmiXklx4kmOWqqyy4osgrJadP0KYUnWhDxD43ELb+yY8ljUi6W9A2IeTNrj1QdV8gXU+PcYzDdqk1bGtEGeyfRQk9mjHHRlerG9LcRWkc1PiKoIsljgZjXmmiyiG5KY7JwQmXBRUqE7k5wDALEFOwJ34dutkBiMc2me0zxCcGnASTadKQbT4JpjTIa21aNQEFzXDg4D2KruO2Vh3mQ2P7HFo8hZA7V2OSTAcPNJqmGrMtmeB4q5bJc68Bu0NkURZofPN5Stmx3kf4jomwm37o/DYZ5d2nE+aaNZlanyorklIX7PwkCXGY0XbNSfJaqvJOUeN1I0WSbOj6d0/KfN9kdjVO8PgBVoEyBDrTNjzMWFykgNuYKf7BeS1zZMET5qzEk5bMnq1xm2Q7NbJN5DQb6ab12cQePqtj+m10/q7LePM9yCLgtjSSpHAtydsL61bQfWd6xQoKLPV2hVdq93gY9kM9zjqSZ4rkrbjCdEjIUVeGgucQ0DUkmB5ITaW1W098u+UfU7lXNq7VqVRBDQAZiT6ooEhq/pFSaTlDndwgeZKjp9L6Vg5j2ibnsujjYESqpVqG8geBQdWDofBOiR730UxGEqtzYeq2o79W57eRYbt8k9c6F8x4fEvpPD2Pcx7dHNJBHiPZeldDv4lZy2hjCA42bXEAE7hUGjSfmFuQWLNhl3WzRBp6PTa3aFtRopGv61nBw9CELTqQVlXsnO3Q6/dZbLXD/gOatyDYjUfm5INtYLV1PxbPsrFi2CoJFnDTnyKQ4qobgiCNRvUGaYOypjaZbIM242I8OC23aYF/rKl2vgw4ToeI/Lqo4oVKZI/PJWximEnR6DgdpNO9N8NigYgryKjtgtNyU0w/SUjfbw/Ap+0/BX7iPW6VYHepKjgvOcH0rB1cEcOkgj4rd6XBj5IsmKcOR+iV18K06gJTU22HWkqF+1rWT4MfJB1TCtboAkm0qt8rRJ5LjE7YJs2Z90EcPUccxdlUuNdzV03TyzPS0TUKETvJ1Uj2wJ0Cjq41rBAh7uWg70BUxj3fEfBFHoI8MUeMQ0vhjnk2Any3qLZ/SNhd1bJk6OsBYEnW/BTNoZqTm73NI8wiOiWw8NTB605nuBE6FunwjcfdX4pRim33OH6tilOSS+jf8yXXMk7vt7LtkqWtgQx0TPAjQjcQugAtC2ebcWnRFB5raIy96xS4sfty+hcemdMH4CR/cJ/9Sgcd0qc+Q1xY3gNf832Shz1C4ooYX/MsM9onxhQ1Hc3Dxn6oR4HALWfiPJMDp73jQz3qE1mk3kFF02tdo6DwdceChr4Wd0cxogCMttxCgq0vJcua5uhWCvxSHZf+hf8QTQaKGKDn022ZUHaewfK4E9sDjr3r1LZe1aNdmejUbUZocpmORBu08iF84SDyU+z9oVaDxUovdTeN7TE8iNHDkZWXJ0yltF8M3hn0U7s93tyQG0abag4OGjhu+45KldH/wCJjXxTxbch06xgJYf7mat7xIVlq4xrgHscHNcJDmkEHxCxThKHc245KW0JsWHNOV4h27e13cUlxjAZkQVZcVUa9uV1/dV/GNLde03jv8eaIkmVvGYG5MfZLamEhWaqEDXp8lojIzyiI+qK6ax24nzTLqwugwKfJkOIHTY/5iisPh3G5eY8l3lGgRLIAhJyNvR4FlyJPsjuiMvw256nzK25gPxEu71H1i11nC/ddVnpYRUVSVBNKk0aDwC6bgQ42ssoYJxu45G89VvEbUYwZGGeaCUnFL5BnWhrmtB018FHtekDUD26uHr/AKITBjPe95uiKtMtLWA9qQWuNm3GhPHVTSOP10uU0/4GuzmtqMFMuDngSzeXZjdo8iQOSFdjmB5YIn6jUDut6qHC1+rDqhDWVqUuzD9fJvEadyRUQQesce3Np75Lj7rd0qb2zBHDHnzLNmfw9Fi56w/IPMra6Vm3X0UZ3eo3PK25RuK57PMGdZO+65f5LlwBUZkcx6pBZJ1sa+YRdKvzS7NP2XTDwQMPcAUHVocFIyqui+e9AABCwVeKIqDj5qB9JAjZMiyJ2VtuthnTTd2Tqx0ljvDceYulzgQszg6qLSemSjJp6PUNk7fp4hsskOHxMPxN9O0OYU1erqvKqVV1NwewlrhoR+aK67E242u0g9mqBdvzRvb9tyxZMDjtdjbjzqWn3Cq7OFkBVJBuj3c1FUYoIsaACVy2k5MWUgbR5olmEHBTsjxFgDWCXeKnO06B1YfBp+ylLWzBI7lt+GJ0UW7PQ+m4ZQx8l3YM7a2HH6D4grR6QCP6bPIQphs4C51W/wCVA0CWjc/d+0LnY59X43OA4AI3C4BpuDPepjmHwhqGqU3ON3X5KRU/jt7G2AIFRrYsJ07k0pbPFWWPJIHw3Ai2vKJKW7GwhgvO4Hx4lNWOH3V0YXE8513Wf3xZt+GMZScBIMyNHAAAH19EgfXa1rqj/hFo48kXtyoOtyt3Q3x3+pVT6RY2SKbfhHut+NcIEnl4w5Dn/e1vyu/zLSpmZYn7rKf6qQ+cSFznHcUQSoKjJVTOWcvXBK4JIsdFjikBp48CuRU46rCVG47j4IAIa9d50LTqbjquy5AwkPWi2NFCHLptRAGyVDUoBESCuXUjqECAbhdU3kEOYS1zTIO8KZzuIXBY06GEDRZMH0mYQBVaWu3uaJb3xqO66cse17Q5jg5u4gyO7v5KgGmeRUmAxtSg/My4/U3c4c+fNZp4F3RohnfaR6FQoyQTu9EXX7LSUDsjaVOqzMw94OrTwP5ddbaxwZRqO4NPqLesLLT5UbLVWQ4qg5jsr27gQZGjgCDz1WU6R/S49yTbF2uP5Uda4g0SGTBJyPksBgbiHgeCKb0iww/5jj3McrZQd9ju9L1mF4lckv8AY0a928HyWOSWp0oobhUd/wBoHuUtxXSs/wDLpBvNxJ9BAQsUvonk9T6aC/KyykTxPJQ0qwfUFGmQ6odYuGDe5x+nFUnFbVrVPiqGODeyPIJt/D+sW4sCJzNI7ogyFbHD9nH6n1dzTWNV/J6hSZlsP06fn0W3PABO6JOgsB+y6ul+3KmSi8zd0NHjr7FXxWziQuc0vsqONxPx1TxJHedFSazyTJVh6R1srWsHefRVtyun9G3qJbUfo5WLcrFEzbLDh1p2q2sSZSD1lDTW1iQGlDWWLECNvXe5YsQM6K2sWIGbRDFixAEWKQK2sQB1TWjqsWIGO+hf+O7/AOs+6b9MP8MfnBYsWR/sRqh+oRbM/wCDxv8A+b/2qJA7VbWLV4Mi7s2FzU1W1iGJHIVk6Af8a3+130WliQz1j90m6Tf4bf7x7OWLFLH+RZ037Eeb9Jf8XwSRYsU5dy7P+bOVixYokD//2Q==" class="rounded-circle user_img_msg"> </div> </div> <div class="d-flex justify-content-start mb-4"> <div class="img_cont_msg"> <img src="https://static.turbosquid.com/Preview/001292/481/WV/_D.jpg" class="rounded-circle user_img_msg"> </div> <div class="msg_cotainer"> Bye, see you <span class="msg_time">9:12 AM, Today</span> </div> </div> </div> <div class="card-footer"> <div class="input-group"> <div class="input-group-append"> <span class="input-group-text attach_btn"><i class="fas fa-paperclip"></i></span> </div> <textarea name="" class="form-control type_msg" placeholder="Type your message..."></textarea> <div class="input-group-append"> <span class="input-group-text send_btn"><i class="fas fa-location-arrow"></i></span> </div> </div> </div> </div> </div> </div> </div> </body> </html> """ # noqa: E501 class MyHandler(BaseHTTPRequestHandler): """ Handle HTTP requests. """ def _interactive_running(self, opt, reply_text): reply = {'episode_done': False, 'text': reply_text} SHARED['agent'].observe(reply) model_res = SHARED['agent'].act() return model_res def do_HEAD(self): """ Handle HEAD requests. """ self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() def do_POST(self): """ Handle POST request, especially replying to a chat message. """ if self.path == '/interact': content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) model_response = self._interactive_running( SHARED.get('opt'), body.decode('utf-8') ) self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() json_str = json.dumps(model_response) self.wfile.write(bytes(json_str, 'utf-8')) elif self.path == '/reset': self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() SHARED['agent'].reset() self.wfile.write(bytes("{}", 'utf-8')) else: return self._respond({'status': 500}) def do_GET(self): """ Respond to GET request, especially the initial load. """ paths = { '/': {'status': 200}, '/favicon.ico': {'status': 202}, # Need for chrome } if self.path in paths: self._respond(paths[self.path]) else: self._respond({'status': 500}) def _handle_http(self, status_code, path, text=None): self.send_response(status_code) self.send_header('Content-type', 'text/html') self.end_headers() content = WEB_HTML.format(STYLE_SHEET, FONT_AWESOME) return bytes(content, 'UTF-8') def _respond(self, opts): response = self._handle_http(opts['status'], self.path) self.wfile.write(response) def setup_interactive(shared): """ Build and parse CLI opts. """ parser = setup_args() parser.add_argument('--port', type=int, default=PORT, help='Port to listen on.') parser.add_argument( '--host', default=HOST_NAME, type=str, help='Host from which allow requests, use 0.0.0.0 to allow all IPs', ) SHARED['opt'] = parser.parse_args(print_args=False) SHARED['opt']['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent' # Create model and assign it to the specified task agent = create_agent(SHARED.get('opt'), requireModelExists=True) SHARED['agent'] = agent SHARED['world'] = create_task(SHARED.get('opt'), SHARED['agent']) # show args after loading model parser.opt = agent.opt parser.print_args() return agent.opt if __name__ == '__main__': opt = setup_interactive(SHARED) MyHandler.protocol_version = 'HTTP/1.0' httpd = HTTPServer((opt['host'], opt['port']), MyHandler) print('http://{}:{}/'.format(opt['host'], opt['port'])) try: httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close()
138.964179
11,582
0.877215
2,443
46,553
16.647974
0.270978
0.011409
0.006762
0.004155
0.907502
0.897495
0.891913
0.890684
0.890684
0.890217
0
0.128162
0.062574
46,553
334
11,583
139.38024
0.803804
0.01102
0
0.473684
0
0.084211
0.93618
0.790293
0
1
0
0
0
1
0.024561
false
0.003509
0.021053
0
0.063158
0.010526
0
0
1
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
1
0
0
0
0
0
0
0
0
0
0
0
0
12
d550a124d2a11a6145335f8717128bdf8f835bbf
108
py
Python
omtool/analysis/utils/__init__.py
Kraysent/Galactic-archeology
51ab18f4bdfc75c1c9eebd745f841b02c57d2d64
[ "Apache-2.0" ]
1
2021-11-27T16:24:07.000Z
2021-11-27T16:24:07.000Z
omtool/analysis/utils/__init__.py
Kraysent/Galactic-archeology
51ab18f4bdfc75c1c9eebd745f841b02c57d2d64
[ "Apache-2.0" ]
32
2021-09-12T16:57:03.000Z
2021-12-04T09:06:54.000Z
omtool/analysis/utils/__init__.py
Kraysent/Galactic-archeology
51ab18f4bdfc75c1c9eebd745f841b02c57d2d64
[ "Apache-2.0" ]
null
null
null
from omtool.analysis.utils.galactic_utils import get_galactic_basis from omtool.analysis.utils.math import *
54
67
0.87037
16
108
5.6875
0.5625
0.21978
0.395604
0.505495
0
0
0
0
0
0
0
0
0.064815
108
2
68
54
0.90099
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
d5949d4125d9cad1045c86f4e4ba4b3eacc732b5
176
py
Python
Experiments/ExperimentModule.py
joelliusp/SpaceHabit
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
[ "MIT" ]
null
null
null
Experiments/ExperimentModule.py
joelliusp/SpaceHabit
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
[ "MIT" ]
13
2016-07-19T04:13:20.000Z
2016-08-17T06:06:47.000Z
Experiments/ExperimentModule.py
joelliusp/SpaceHabit
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
[ "MIT" ]
null
null
null
jvar = 0 def return_something(): return "Hi world" def change_jvar(): jvar = 18 print(jvar) def print_jvar(): print(jvar) print("Called on import! Oops!")
11.733333
32
0.630682
25
176
4.32
0.56
0.25
0.259259
0
0
0
0
0
0
0
0
0.022388
0.238636
176
14
33
12.571429
0.783582
0
0
0.222222
0
0
0.177143
0
0
0
0
0
0
1
0.333333
false
0
0.111111
0.111111
0.555556
0.444444
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
1
0
7
637e13c1f0be7001046ed119921e792f36c837e4
129
py
Python
djadmin2/tests/__init__.py
beezz/django-admin2
4aec1a3836011cd46e5eb8b6375590bf5a76c044
[ "BSD-3-Clause" ]
1
2015-04-30T13:34:03.000Z
2015-04-30T13:34:03.000Z
djadmin2/tests/__init__.py
taxido/django-admin2
6a6b3d5f790b8289b0dd0f9194d80799af8804dc
[ "BSD-3-Clause" ]
1
2021-03-19T23:57:09.000Z
2021-03-19T23:57:09.000Z
djadmin2/tests/__init__.py
RyanBalfanz/django-admin2
e7f0611eea22370bb3418e25e9cd10ddbac4fd6d
[ "BSD-3-Clause" ]
null
null
null
from test_admin2tags import * from test_types import * from test_utils import * from test_views import * from test_core import *
21.5
29
0.806202
20
129
4.95
0.4
0.40404
0.565657
0
0
0
0
0
0
0
0
0.009174
0.155039
129
5
30
25.8
0.899083
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
8950f1dbce5a1cca3b764a84f30cdec97fe076d8
189
py
Python
om-backend/project/routes/user/__init__.py
SOFIE-project/SMAUG-Marketplace
404b6caa7c5ea58c27c20d716dffa60904fb7f46
[ "Apache-2.0" ]
1
2021-03-29T15:11:46.000Z
2021-03-29T15:11:46.000Z
om-backend/project/routes/user/__init__.py
SOFIE-project/SMAUG-Marketplace
404b6caa7c5ea58c27c20d716dffa60904fb7f46
[ "Apache-2.0" ]
null
null
null
om-backend/project/routes/user/__init__.py
SOFIE-project/SMAUG-Marketplace
404b6caa7c5ea58c27c20d716dffa60904fb7f46
[ "Apache-2.0" ]
1
2021-01-30T02:49:38.000Z
2021-01-30T02:49:38.000Z
from flask import Blueprint blueprint = Blueprint("user", "__name__") from project.routes.user import locker from project.routes.user import request from project.routes.user import token
23.625
41
0.809524
26
189
5.730769
0.423077
0.221477
0.342282
0.422819
0.543624
0
0
0
0
0
0
0
0.116402
189
7
42
27
0.892216
0
0
0
0
0
0.063492
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0.4
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
7
896f0f92cf687b44e7bc9f9992f6e740e72eb59b
136
py
Python
py/src/datacentric/platform/logging/__init__.py
datacentricorg/datacentric
b9e2dedfac35759ea09bb5653095daba5861512e
[ "Apache-2.0" ]
1
2019-08-08T01:27:47.000Z
2019-08-08T01:27:47.000Z
py/src/datacentric/platform/logging/__init__.py
datacentricorg/datacentric
b9e2dedfac35759ea09bb5653095daba5861512e
[ "Apache-2.0" ]
null
null
null
py/src/datacentric/platform/logging/__init__.py
datacentricorg/datacentric
b9e2dedfac35759ea09bb5653095daba5861512e
[ "Apache-2.0" ]
null
null
null
from datacentric.platform.logging.in_memory_log import InMemoryLog from datacentric.platform.logging.log_entry_type import LogEntryType
45.333333
68
0.897059
18
136
6.555556
0.666667
0.254237
0.389831
0.508475
0
0
0
0
0
0
0
0
0.058824
136
2
69
68
0.921875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
89b80d2d0945cd2fe106c40304f9a151d5220963
216
py
Python
04_flask_intro/test/test_util.py
coryandrewtaylor/IntroToPython
26eb6bbd8811aeb9181054162227638b19e254f1
[ "MIT" ]
null
null
null
04_flask_intro/test/test_util.py
coryandrewtaylor/IntroToPython
26eb6bbd8811aeb9181054162227638b19e254f1
[ "MIT" ]
5
2020-04-17T17:12:28.000Z
2021-03-07T23:47:13.000Z
04_flask_intro/test/test_util.py
coryandrewtaylor/IntroToPython
26eb6bbd8811aeb9181054162227638b19e254f1
[ "MIT" ]
7
2020-04-23T21:46:38.000Z
2021-03-09T00:05:40.000Z
from flask_intro.util import _is_number def test_int_is_number(): assert _is_number("1") def test_float_is_number(): assert _is_number("1.0") def test_str_is_not_number(): assert not _is_number("a")
16.615385
39
0.736111
37
216
3.783784
0.459459
0.342857
0.2
0.228571
0.328571
0.328571
0
0
0
0
0
0.016484
0.157407
216
13
40
16.615385
0.752747
0
0
0
0
0
0.023041
0
0
0
0
0
0.428571
1
0.428571
true
0
0.142857
0
0.571429
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
1
0
0
7
985539e7fb785af3eceed48de87959b8ff00c9d9
274
py
Python
api/util.py
rozap/aircooledrescue
95c0fec2a8452205019db7e721a7da1fcc4d5507
[ "MIT" ]
null
null
null
api/util.py
rozap/aircooledrescue
95c0fec2a8452205019db7e721a7da1fcc4d5507
[ "MIT" ]
2
2017-04-22T23:23:58.000Z
2017-04-22T23:24:19.000Z
api/util.py
rozap/aircooledrescue
95c0fec2a8452205019db7e721a7da1fcc4d5507
[ "MIT" ]
null
null
null
import re def is_email(val, allow_none = True): if allow_none and len(val) == 0: return True return re.match(r"[^@]+@[^@]+\.[^@]+", val) def is_phone(val, allow_none = True): if allow_none and len(val) == 0: return True return re.match(r"\d{3}-\d{3}-\d{3}", val)
21.076923
44
0.613139
50
274
3.24
0.38
0.222222
0.148148
0.197531
0.753086
0.753086
0.753086
0.753086
0.753086
0.753086
0
0.021834
0.164234
274
13
45
21.076923
0.68559
0
0
0.444444
0
0
0.127273
0
0
0
0
0
0
1
0.222222
false
0
0.111111
0
0.777778
0
0
0
0
null
1
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
9
986835c45d72d8d29f3d02c80e1dbc32a84fd253
6,320
py
Python
tests/utils/test_datesToPeriods.py
ExesiosPB/libm
09c2638d895a4ba69e0d7f4f0e353f27d4b7911f
[ "MIT" ]
25
2018-12-14T22:17:09.000Z
2020-04-14T16:14:11.000Z
tests/utils/test_datesToPeriods.py
davidlenz/pygrams
8cb05bd9704c77e700fd2462e032cefd9a3ef475
[ "MIT" ]
250
2018-12-14T09:52:58.000Z
2020-05-13T08:33:45.000Z
tests/utils/test_datesToPeriods.py
davidlenz/pygrams
8cb05bd9704c77e700fd2462e032cefd9a3ef475
[ "MIT" ]
13
2018-12-12T10:51:59.000Z
2020-04-20T11:35:58.000Z
import unittest import numpy as np import numpy.testing as npt from scripts.utils.date_utils import tfidf_with_dates_to_weekly_term_counts class test_usptoDatesToPeriods(unittest.TestCase): @staticmethod def run_test_with_conversion(combined_array): numpy_matrix = np.array(combined_array) tfidf_matrix = np.array(numpy_matrix[:, 1:]) publication_week_dates = np.array(numpy_matrix[:, 0]) return tfidf_with_dates_to_weekly_term_counts(tfidf_matrix, publication_week_dates) def test_week_single_entry(self): tfidf = [ [200801, 0, 0.3, 0], ] expected_term_counts = np.array([ [0, 1, 0], ]) expected_term_totals = [1] expected_week_dates = [200801] actual_term_counts, actual_term_totals, actual_week_dates = self.run_test_with_conversion(tfidf) npt.assert_array_equal(expected_term_counts, actual_term_counts.todense()) npt.assert_array_equal(expected_term_totals, actual_term_totals) npt.assert_array_equal(expected_week_dates, actual_week_dates) def test_week_combining_and_gaps(self): combined_array = [ [200801, 0, 0.3, 0], [200801, 0, 0, 0], [200802, 0, 0, 2.3], [200804, 0.1, 0.3, 0], [200804, 0.2, 0, 0.1], [200806, 0, 0.3, 0], ] expected_term_counts = np.array([ [0, 1, 0], [0, 0, 1], [0, 0, 0], [2, 1, 1], [0, 0, 0], [0, 1, 0] ]) expected_term_totals = [2, 1, 0, 2, 0, 1] expected_week_dates = [200801, 200802, 200803, 200804, 200805, 200806] actual_term_counts, actual_term_totals, actual_week_dates = self.run_test_with_conversion(combined_array) actual_term_counts_dense = actual_term_counts.todense() npt.assert_array_equal(expected_term_counts, actual_term_counts_dense) npt.assert_array_equal(expected_term_totals, actual_term_totals) npt.assert_array_equal(expected_week_dates, actual_week_dates) def test_week_combining_and_split_across_years(self): combined_array = [ [200850, 0, 0.3, 0], [200852, 0, 0, 0], [200852, 0, 0, 2.3], [200902, 0.1, 0.3, 0], [200902, 0.2, 0, 0.1], [200904, 0, 0.3, 0], ] expected_term_counts = np.array([ [0, 1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [2, 1, 1], [0, 0, 0], [0, 1, 0] ]) expected_week_dates = [200850, 200851, 200852, 200901, 200902, 200903, 200904] expected_term_totals = [1, 0, 2, 0, 2, 0, 1] actual_term_counts, actual_term_totals, actual_week_dates = self.run_test_with_conversion(combined_array) actual_term_counts_dense = actual_term_counts.todense() npt.assert_array_equal(expected_term_counts, actual_term_counts_dense) npt.assert_array_equal(expected_term_totals, actual_term_totals) npt.assert_array_equal(expected_week_dates, actual_week_dates) def test_week_combining_and_split_across_years_create_up_to_week_52(self): combined_array = [ [200850, 0, 0.3, 0], [200850, 0, 0, 0], [200850, 0, 0, 2.3], [200902, 0.1, 0.3, 0], [200902, 0.2, 0, 0.1], [200904, 0, 0.3, 0], ] expected_term_counts = np.array([ [0, 1, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [2, 1, 1], [0, 0, 0], [0, 1, 0] ]) expected_week_dates = [200850, 200851, 200852, 200901, 200902, 200903, 200904] expected_term_totals = [3, 0, 0, 0, 2, 0, 1] actual_term_counts, actual_term_totals, actual_week_dates = self.run_test_with_conversion(combined_array) npt.assert_array_equal(expected_term_counts, actual_term_counts.todense()) npt.assert_array_equal(expected_term_totals, actual_term_totals) npt.assert_array_equal(expected_week_dates, actual_week_dates) def test_week_combining_and_split_across_years_include_empty_week_53(self): combined_array = [ [200850, 0, 0.3, 0], [200852, 0, 0, 0], [200852, 0, 0, 2.3], [200853, 0, 0, 0], [200902, 0.1, 0.3, 0], [200902, 0.2, 0, 0.1], [200904, 0, 0.3, 0], ] expected_term_counts = np.array([ [0, 1, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [2, 1, 1], [0, 0, 0], [0, 1, 0] ]) expected_week_dates = [200850, 200851, 200852, 200853, 200901, 200902, 200903, 200904] expected_term_totals = [1, 0, 2, 1, 0, 2, 0, 1] actual_term_counts, actual_term_totals, actual_week_dates = self.run_test_with_conversion(combined_array) npt.assert_array_equal(expected_term_counts, actual_term_counts.todense()) npt.assert_array_equal(expected_term_totals, actual_term_totals) npt.assert_array_equal(expected_week_dates, actual_week_dates) def test_week_combining_and_split_across_years_include_non_zero_week_53(self): combined_array = [ [200850, 0, 0.3, 0], [200852, 0, 0, 0], [200852, 0, 0, 2.3], [200853, 0, 1, 0], [200902, 0.1, 0.3, 0], [200902, 0.2, 0, 0.1], [200904, 0, 0.3, 0], ] expected_term_counts = np.array([ [0, 1, 0], [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 0], [2, 1, 1], [0, 0, 0], [0, 1, 0] ]) expected_week_dates = [200850, 200851, 200852, 200853, 200901, 200902, 200903, 200904] expected_term_totals = [1, 0, 2, 1, 0, 2, 0, 1] actual_term_counts, actual_term_totals, actual_week_dates = self.run_test_with_conversion(combined_array) npt.assert_array_equal(expected_term_counts, actual_term_counts.todense()) npt.assert_array_equal(expected_term_totals, actual_term_totals) npt.assert_array_equal(expected_week_dates, actual_week_dates)
39.254658
113
0.578481
861
6,320
3.905923
0.085947
0.05174
0.040143
0.028546
0.871841
0.844187
0.823669
0.804341
0.795421
0.795421
0
0.161481
0.303323
6,320
160
114
39.5
0.602317
0
0
0.724832
0
0
0
0
0
0
0
0
0.120805
1
0.04698
false
0
0.026846
0
0.087248
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
98915e0cdc2b6a1745a7517a718e1b0e727e3df1
125
py
Python
pscweb2/get_random_secret_key.py
satamame/pscweb2
f15f6e2594a7339e4e964f2cb4d7363743b8cbd6
[ "MIT" ]
null
null
null
pscweb2/get_random_secret_key.py
satamame/pscweb2
f15f6e2594a7339e4e964f2cb4d7363743b8cbd6
[ "MIT" ]
1
2021-05-03T04:01:40.000Z
2021-05-03T04:01:40.000Z
pscutil/get_random_secret_key.py
satamame/pscutil
fd7786a142c0691296013e49c139a6104e8cbadf
[ "MIT" ]
null
null
null
from django.core.management.utils import get_random_secret_key print('SECRET_KEY = \'{0}\''.format(get_random_secret_key()))
41.666667
62
0.792
19
125
4.842105
0.684211
0.293478
0.326087
0.391304
0
0
0
0
0
0
0
0.008475
0.056
125
2
63
62.5
0.771186
0
0
0
0
0
0.112
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
8
7f906736df55894c3dae7e7df031a180cc485213
187
py
Python
Noisy_exp/HF_wo_Fire/TurtleBot_v0/envs/__init__.py
tufts-ai-robotics-group/ACuTE
fb3fa7a6f7e8c32b408717a5b938ff5e793eebc0
[ "MIT" ]
null
null
null
Noisy_exp/HF_wo_Fire/TurtleBot_v0/envs/__init__.py
tufts-ai-robotics-group/ACuTE
fb3fa7a6f7e8c32b408717a5b938ff5e793eebc0
[ "MIT" ]
null
null
null
Noisy_exp/HF_wo_Fire/TurtleBot_v0/envs/__init__.py
tufts-ai-robotics-group/ACuTE
fb3fa7a6f7e8c32b408717a5b938ff5e793eebc0
[ "MIT" ]
null
null
null
from TurtleBot_v0.envs.turtlebot_v0_env import TurtleBotV0Env from TurtleBot_v0.envs.turtlebot_v1_env import TurtleBotV1Env from TurtleBot_v0.envs.turtlebot_v2_env import TurtleBotV2Env
37.4
61
0.898396
27
187
5.888889
0.407407
0.27673
0.283019
0.358491
0.528302
0
0
0
0
0
0
0.051724
0.069519
187
4
62
46.75
0.862069
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
7fb0c1208f6e86af06088f6c7fb0f792482c4fa3
178
py
Python
python/testData/formatter/fromImportWrappingChopDownIfLong_after.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/formatter/fromImportWrappingChopDownIfLong_after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/formatter/fromImportWrappingChopDownIfLong_after.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
from module import foo, bar from module import foo, \ bar, \ baz from module import (foo, bar) from module import (foo, bar, baz)
19.777778
29
0.52809
22
178
4.272727
0.272727
0.425532
0.680851
0.808511
1
1
1
1
1
1
0
0
0.404494
178
8
30
22.25
0.886792
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
13
f69f91ac6b87c322a697c63fce7f1e51681e20b2
162
py
Python
mayday/objects/__init__.py
codacy-badger/mayday-ticketing-bot
7cbb1d201ececd2eb879c047e2cf7588862eb89f
[ "MIT" ]
null
null
null
mayday/objects/__init__.py
codacy-badger/mayday-ticketing-bot
7cbb1d201ececd2eb879c047e2cf7588862eb89f
[ "MIT" ]
null
null
null
mayday/objects/__init__.py
codacy-badger/mayday-ticketing-bot
7cbb1d201ececd2eb879c047e2cf7588862eb89f
[ "MIT" ]
null
null
null
from mayday.objects.query import Query from mayday.objects.ticket import Ticket from mayday.objects.user import User from mayday.objects.wishlist import Wishlist
32.4
44
0.851852
24
162
5.75
0.333333
0.289855
0.492754
0
0
0
0
0
0
0
0
0
0.098765
162
4
45
40.5
0.945205
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
f6c78a7ddf71cd65762f26d013d6291bdc16ca9a
6,035
py
Python
tests/test_get_log_offset.py
dkduo/duo_log_sync
03ed63983f593a8ddce6742313b90a2d0aa4122f
[ "MIT" ]
null
null
null
tests/test_get_log_offset.py
dkduo/duo_log_sync
03ed63983f593a8ddce6742313b90a2d0aa4122f
[ "MIT" ]
null
null
null
tests/test_get_log_offset.py
dkduo/duo_log_sync
03ed63983f593a8ddce6742313b90a2d0aa4122f
[ "MIT" ]
null
null
null
from unittest import TestCase from unittest.mock import patch from duologsync.producer.producer import Producer class TestGetLogOffset(TestCase): def test_authlog_offset_value_producer(self): sample_authlog_response = {'authlogs': [{'access_device': {'browser': 'Chrome', 'browser_version': '84.0.4147.125', 'flash_version': 'uninstalled', 'hostname': None, 'ip': '107.137.171.62', 'is_encryption_enabled': 'unknown', 'is_firewall_enabled': 'unknown', 'is_password_set': 'unknown', 'java_version': 'uninstalled', 'location': {'city': 'Ann Arbor', 'country': 'United States', 'state': 'Michigan'}, 'os': 'Mac OS X', 'os_version': '10.15.6'}, 'alias': '', 'application': {'key': 'DINALEC345G8XZDFP7EP', 'name': 'Web SDK'}, 'auth_device': {'ip': None, 'location': {'city': None, 'country': None, 'state': None}, 'name': 'WAQWPO8MD9PPHPW2HPCI'}, 'email': '', 'event_type': 'authentication', 'factor': None, 'isotimestamp': '2020-08-17T13:43:58.335969+00:00', 'ood_software': None, 'reason': 'user_approved', 'result': 'success', 'timestamp': 1597671838, 'trusted_endpoint_status': 'not trusted', 'txid': '1f6e1807-1732-49aa-8068-a973e6144e5e', 'user': {'groups': [], 'key': 'DU50VRIGM3ELGSN0XAA3', 'name': 'hi'}, 'eventtype': 'authentication', 'host': 'api-first.test.duosecurity.com'}], 'metadata': {'next_offset': ['1597671838335', '1f6e1807-1732-49aa-8068-a973e6144e5e'], 'total_objects': 94}} offset_in_metadata = sample_authlog_response['metadata']['next_offset'] producer_offset = Producer.get_log_offset(sample_authlog_response) self.assertEqual(offset_in_metadata, producer_offset) def test_authlog_offset_value_consumer(self): sample_authlog_response = {'authlogs': [{'access_device': {'browser': 'Chrome', 'browser_version': '84.0.4147.125', 'flash_version': 'uninstalled', 'hostname': None, 'ip': '107.137.171.62', 'is_encryption_enabled': 'unknown', 'is_firewall_enabled': 'unknown', 'is_password_set': 'unknown', 'java_version': 'uninstalled', 'location': {'city': 'Ann Arbor', 'country': 'United States', 'state': 'Michigan'}, 'os': 'Mac OS X', 'os_version': '10.15.6'}, 'alias': '', 'application': {'key': 'DINALEC345G8XZDFP7EP', 'name': 'Web SDK'}, 'auth_device': {'ip': None, 'location': {'city': None, 'country': None, 'state': None}, 'name': 'WAQWPO8MD9PPHPW2HPCI'}, 'email': '', 'event_type': 'authentication', 'factor': None, 'isotimestamp': '2020-08-17T13:43:58.335969+00:00', 'ood_software': None, 'reason': 'user_approved', 'result': 'success', 'timestamp': 1597671838, 'trusted_endpoint_status': 'not trusted', 'txid': '1f6e1807-1732-49aa-8068-a973e6144e5e', 'user': {'groups': [], 'key': 'DU50VRIGM3ELGSN0XAA3', 'name': 'hi'}, 'eventtype': 'authentication', 'host': 'api-first.test.duosecurity.com'}], 'metadata': {'next_offset': ['1597671838335', '1f6e1807-1732-49aa-8068-a973e6144e5e'], 'total_objects': 94}} offset_in_metadata = sample_authlog_response['metadata']['next_offset'] producer_offset = Producer.get_log_offset(sample_authlog_response.get('authlogs')[-1]) self.assertEqual(offset_in_metadata, producer_offset) def test_adminaction_offset_value_producer(self): adminaction_response = [{'action': 'admin_login', 'description': '{"ip_address": "72.35.40.116", "device": "248-971-9157", "primary_auth_method": "Password", "factor": "push"}', 'isotimestamp': '2020-02-10T14:41:22+00:00', 'object': None, 'timestamp': 1581345682, 'username': 'CJ Na', 'eventtype': 'administrator', 'host': 'api-first.test.duosecurity.com'}] adminaction_current_offset = adminaction_response[-1]['timestamp'] + 1 adminaction_offset_to_set = Producer.get_log_offset(adminaction_response) self.assertEqual(adminaction_current_offset, adminaction_offset_to_set) def test_adminaction_offset_value_consumer(self): adminaction_response = [{'action': 'admin_login', 'description': '{"ip_address": "72.35.40.116", "device": "248-971-9157", "primary_auth_method": "Password", "factor": "push"}', 'isotimestamp': '2020-02-10T14:41:22+00:00', 'object': None, 'timestamp': 1581345682, 'username': 'CJ Na', 'eventtype': 'administrator', 'host': 'api-first.test.duosecurity.com'}] adminaction_current_offset = adminaction_response[0]['timestamp'] + 1 adminaction_offset_to_set = Producer.get_log_offset(adminaction_response[0]) self.assertEqual(adminaction_current_offset, adminaction_offset_to_set) def test_telephony_offset_value_producer(self): telephony_response = [{'context': 'authentication', 'credits': 2, 'isotimestamp': '2020-05-18T11:32:53+00:00', 'phone': '+13135105356', 'timestamp': 1589801573, 'type': 'phone', 'eventtype': 'telephony', 'host': 'api-first.test.duosecurity.com'}] telephony_current_offset = telephony_response[-1]['timestamp'] + 1 telephony_offset_to_set = Producer.get_log_offset(telephony_response) self.assertEqual(telephony_current_offset, telephony_offset_to_set) def test_telephony_offset_value_consumer(self): telephony_response = [{'action': 'admin_login', 'description': '{"ip_address": "72.35.40.116", "device": "248-971-9157", "primary_auth_method": "Password", "factor": "push"}', 'isotimestamp': '2020-02-10T14:41:22+00:00', 'object': None, 'timestamp': 1581345682, 'username': 'CJ Na', 'eventtype': 'administrator', 'host': 'api-first.test.duosecurity.com'}] telephony_current_offset = telephony_response[0]['timestamp'] + 1 telephony_offset_to_set = Producer.get_log_offset(telephony_response[0]) self.assertEqual(telephony_current_offset, telephony_offset_to_set) def test_offset_is_retained_when_no_logs(self): sample_authlog_response = {'authlogs': [], 'metadata': {'next_offset': None, 'total_objects': 94}} current_log_offset = ['1596815692352', 'aecef809-a026-464f-9ba6-cc88920cd55d'] new_log_offset = Producer.get_log_offset(sample_authlog_response, current_log_offset) self.assertEqual(current_log_offset, new_log_offset)
123.163265
1,208
0.712013
714
6,035
5.754902
0.240896
0.026284
0.040886
0.034072
0.869555
0.84035
0.84035
0.84035
0.819177
0.793867
0
0.092293
0.107705
6,035
48
1,209
125.729167
0.670752
0
0
0.307692
0
0.076923
0.441591
0.112345
0
0
0
0
0.179487
1
0.179487
false
0.128205
0.076923
0
0.282051
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
9
1010424fc3081fd21420748cdde663e867adf670
564
py
Python
sfaira_extension/versions/topology_versions/human/embedding/__init__.py
theislab/sfaira_extension
22910c7f20e48defbcb5b82c2137e97ee7ed428f
[ "BSD-3-Clause" ]
null
null
null
sfaira_extension/versions/topology_versions/human/embedding/__init__.py
theislab/sfaira_extension
22910c7f20e48defbcb5b82c2137e97ee7ed428f
[ "BSD-3-Clause" ]
3
2020-11-03T17:37:37.000Z
2021-02-15T12:47:52.000Z
sfaira_extension/versions/topology_versions/human/embedding/__init__.py
theislab/sfaira_extension
22910c7f20e48defbcb5b82c2137e97ee7ed428f
[ "BSD-3-Clause" ]
1
2022-03-03T15:11:14.000Z
2022-03-03T15:11:14.000Z
from sfaira_extension.versions.topology_versions.human.embedding.ae import AE_TOPOLOGIES from sfaira_extension.versions.topology_versions.human.embedding.linear import LINEAR_TOPOLOGIES from sfaira_extension.versions.topology_versions.human.embedding.nmf import NMF_TOPOLOGIES from sfaira_extension.versions.topology_versions.human.embedding.vae import VAE_TOPOLOGIES from sfaira_extension.versions.topology_versions.human.embedding.vaeiaf import VAEIAF_TOPOLOGIES from sfaira_extension.versions.topology_versions.human.embedding.vaevamp import VAEVAMP_TOPOLOGIES
80.571429
98
0.904255
72
564
6.833333
0.208333
0.121951
0.231707
0.329268
0.796748
0.796748
0.796748
0.796748
0.680894
0
0
0
0.042553
564
6
99
94
0.911111
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
1
1
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
9
126623f80ff7d390cf0d288a5cb02794013ce9d8
9,684
py
Python
models.py
cemac/SWIFTDB
5c6bc0ae4ff674c2eede44783ca1738630d97ebb
[ "MIT" ]
2
2020-07-14T14:14:45.000Z
2021-05-13T13:01:51.000Z
models.py
cemac-tech/SWIFTDB
5c6bc0ae4ff674c2eede44783ca1738630d97ebb
[ "MIT" ]
18
2019-02-07T10:28:19.000Z
2020-06-18T18:31:41.000Z
models.py
cemac-tech/SWIFTDB
5c6bc0ae4ff674c2eede44783ca1738630d97ebb
[ "MIT" ]
1
2019-03-25T14:54:26.000Z
2019-03-25T14:54:26.000Z
from SWIFTDBApp import db class Partners(db.Model): __tablename__ = 'partners' id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(), nullable=False, unique=True) country = db.Column(db.String()) role = db.Column(db.String()) Deliverables_Rel = db.relationship('Deliverables') Tasks_Rel = db.relationship('Tasks') Users2Partners_Rel = db.relationship('Users2Partners') def __init__(self, name, country, role): self.name = name self.country = country self.role = role def __repr__(self): return '<name {}>'.format(self.name) class Work_Packages(db.Model): __tablename__ = 'work_packages' id = db.Column(db.Integer, primary_key=True, autoincrement=True) code = db.Column(db.String(), nullable=False, unique=True) name = db.Column(db.String(), nullable=False) previous_report = db.Column(db.String(())) status = db.Column(db.String()) issues = db.Column(db.String()) next_deliverable = db.Column(db.String()) date_edited = db.Column(db.Date()) Deliverables_Rel = db.relationship('Deliverables') Tasks_Rel = db.relationship('Tasks') Users2Work_Packages_Rel = db.relationship('Users2Work_Packages') def __init__(self, code, name, previous_report, status, issues, next_deliverable, date_edited): self.code = code self.name = name self.previous_report = previous_report self.status = status self.issues = issues self.next_deliverable = next_deliverable self.date_edited = date_edited def __repr__(self): return '<id {}>'.format(self.id) class Work_Packages_Archive(db.Model): __tablename__ = 'work_packages_archive' id = db.Column(db.Integer, primary_key=True, autoincrement=True) date_edited = db.Column(db.Date()) code = db.Column(db.String(), nullable=False, unique=True) status = db.Column(db.String()) issues = db.Column(db.String()) next_deliverable = db.Column(db.String()) def __init__(self, date_edited, code, status, issues, next_deliverable): self.date_edited = date_edited self.code = code self.status = status self.issues = issues self.next_deliverable = next_deliverable def __repr__(self): return '<id {}>'.format(self.id) class Deliverables(db.Model): __tablename__ = 'deliverables' id = db.Column(db.Integer, primary_key=True, autoincrement=True) code = db.Column(db.String(), nullable=False, unique=True) work_package = db.Column(db.String(), db.ForeignKey('work_packages.code'), nullable=False) description = db.Column(db.String(), nullable=False) partner = db.Column(db.String(), db.ForeignKey('partners.name'), nullable=False) person_responsible = db.Column(db.String()) month_due = db.Column(db.Date, nullable=False) previous_report = db.Column(db.String()) progress = db.Column(db.String()) percent = db.Column(db.Integer, nullable=False) papers = db.Column(db.String()) paper_submission_date = db.Column(db.Date()) date_edited = db.Column(db.Date()) def __init__(self, code, work_package, description, partner, person_responsible, month_due, previous_report, progress, percent, papers, paper_submission_date, date_edited): self.code = code self.work_package = work_package self.description = description self.partner = partner self.person_responsible = person_responsible self.month_due = month_due self.previous_report = previous_report self.progress = progress self.percent = percent self.papers = papers self.paper_submission_date = paper_submission_date self.date_edited = date_edited def __repr__(self): return '<id {}>'.format(self.id) class Deliverables_Archive(db.Model): __tablename__ = 'deliverables_archive' id = db.Column(db.Integer, primary_key=True, autoincrement=True) date_edited = db.Column(db.Date()) code = db.Column(db.String(), nullable=False, unique=True) person_responsible = db.Column(db.String()) progress = db.Column(db.String()) percent = db.Column(db.Integer) papers = db.Column(db.String()) paper_submission_date = db.Column(db.Date()) def __init__(self, date_edited, code, person_responsible, progress, percent, papers, paper_submission_date): self.date_edited = date_edited self.code = code self.person_responsible = person_responsible self.progress = progress self.percent = percent self.papers = papers self.paper_submission_date = paper_submission_date def __repr__(self): return '<id {}>'.format(self.id) class Users(db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(), unique=True) password = db.Column(db.String()) Users2Work_Packages_Rel = db.relationship('Users2Work_Packages') Users2Partners_Rel = db.relationship('Users2Partners') def __init__(self, username, password): self.username = username self.password = password def __repr__(self): return '<id {}>'.format(self.id) class Users2Work_Packages(db.Model): __tablename__ = 'users2work_packages' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(), db.ForeignKey('users.username'), nullable=False) work_package = db.Column(db.String(), db.ForeignKey('work_packages.code'), nullable=False) __table_args__ = (db.UniqueConstraint('username', 'work_package', name='_username_work_package_uc'),) def __init__(self, username, work_package): self.username = username self.work_package = work_package def __repr__(self): return '<id {}>'.format(self.id) class Tasks(db.Model): __tablename__ = 'tasks' id = db.Column(db.Integer, primary_key=True, autoincrement=True) code = db.Column(db.String(), nullable=False, unique=True) work_package = db.Column(db.String(), db.ForeignKey('work_packages.code'), nullable=False) description = db.Column(db.String(), nullable=False) partner = db.Column(db.String(), db.ForeignKey('partners.name'), nullable=False) person_responsible = db.Column(db.String()) month_due = db.Column(db.Date, nullable=False) previous_report = db.Column(db.String()) progress = db.Column(db.String()) percent = db.Column(db.Integer, nullable=False) papers = db.Column(db.String()) paper_submission_date = db.Column(db.Date()) date_edited = db.Column(db.Date()) def __init__(self, code, work_package, description, partner, person_responsible, month_due, previous_report, progress, percent, papers, paper_submission_date, date_edited): self.code = code self.work_package = work_package self.description = description self.partner = partner self.person_responsible = person_responsible self.month_due = month_due self.previous_report = previous_report self.progress = progress self.percent = percent self.papers = papers self.paper_submission_date = paper_submission_date self.date_edited = date_edited def __repr__(self): return '<id {}>'.format(self.id) class Tasks_Archive(db.Model): __tablename__ = 'tasks_archive' id = db.Column(db.Integer, primary_key=True, autoincrement=True) date_edited = db.Column(db.Date()) code = db.Column(db.String(), nullable=False, unique=True) person_responsible = db.Column(db.String()) progress = db.Column(db.String()) percent = db.Column(db.Integer) papers = db.Column(db.String()) paper_submission_date = db.Column(db.Date()) def __init__(self, date_edited, code, person_responsible, progress, percent, papers, paper_submission_date): self.date_edited = date_edited self.code = code self.person_responsible = person_responsible self.progress = progress self.percent = percent self.papers = papers self.paper_submission_date = paper_submission_date def __repr__(self): return '<id {}>'.format(self.id) class Users2Partners(db.Model): __tablename__ = 'users2partners' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(), db.ForeignKey('users.username'), nullable=False) partner = db.Column(db.String(), db.ForeignKey('partners.name'), nullable=False) __table_args__ = (db.UniqueConstraint('username', 'partner', name='_username_partner_uc'),) def __init__(self, username, partner): self.username = username self.partner = partner def __repr__(self): return '<id {}>'.format(self.id) class Counts(db.Model): __tablename__ = 'counts' id = db.Column(db.Integer, primary_key=True, autoincrement=True) code = db.Column(db.String(), nullable=False, unique=True) count = db.Column(db.Integer, nullable=False) def __init__(self, code, count): self.code = code self.count = count def __repr__(self): return '<id {}>'.format(self.id)
35.214545
78
0.649835
1,135
9,684
5.280176
0.059912
0.096112
0.12014
0.11747
0.85967
0.840314
0.817454
0.785583
0.744035
0.723344
0
0.001613
0.231826
9,684
274
79
35.343066
0.804006
0
0
0.776256
0
0
0.053284
0.00475
0
0
0
0
0
1
0.100457
false
0.013699
0.004566
0.050228
0.630137
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
8
89e7f2541dbcb0921455a954b33f601750189df3
219
py
Python
slam_recognition/util/math/almost_equal.py
SimLeek/pySILEnT
feec2d1fb654d7c8dc25f610916f4e9b202a1092
[ "Apache-2.0", "MIT" ]
5
2018-11-18T17:35:59.000Z
2019-02-13T20:25:58.000Z
slam_recognition/util/math/almost_equal.py
SimLeek/slam_recognition
feec2d1fb654d7c8dc25f610916f4e9b202a1092
[ "Apache-2.0", "MIT" ]
12
2018-10-31T01:57:55.000Z
2019-02-07T05:49:36.000Z
slam_recognition/util/math/almost_equal.py
SimLeek/pySILEnT
feec2d1fb654d7c8dc25f610916f4e9b202a1092
[ "Apache-2.0", "MIT" ]
null
null
null
import tensorflow as tf def almost_equal(tensor1, tensor2, diff=0.51): return tf.math.less_equal(tensor1 - tensor2 + diff, diff * 2) def equality_distance(tensor1, tensor2): return tf.abs(tensor1 - tensor2)
21.9
65
0.730594
32
219
4.90625
0.59375
0.356688
0.242038
0.292994
0
0
0
0
0
0
0
0.065574
0.164384
219
9
66
24.333333
0.79235
0
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0.2
0.4
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
7
d60e8115260e553470be4fe369a7dd592a41e9e3
158
py
Python
bitmovin_api_sdk/encoding/encodings/captions/__init__.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
11
2019-07-03T10:41:16.000Z
2022-02-25T21:48:06.000Z
bitmovin_api_sdk/encoding/encodings/captions/__init__.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
8
2019-11-23T00:01:25.000Z
2021-04-29T12:30:31.000Z
bitmovin_api_sdk/encoding/encodings/captions/__init__.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
13
2020-01-02T14:58:18.000Z
2022-03-26T12:10:30.000Z
from bitmovin_api_sdk.encoding.encodings.captions.captions_api import CaptionsApi from bitmovin_api_sdk.encoding.encodings.captions.scc.scc_api import SccApi
52.666667
81
0.892405
23
158
5.869565
0.478261
0.177778
0.222222
0.266667
0.637037
0.637037
0.637037
0
0
0
0
0
0.050633
158
2
82
79
0.9
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
d626f105a4c9e55f8a4cfa8901a52075b253fdbe
2,656
py
Python
pelote/classes/traversal.py
medialab/pelote
cef80daeb19ef2fef73f8a1fcfc8477aa11bfb9a
[ "MIT" ]
2
2022-03-07T20:00:10.000Z
2022-03-21T12:36:58.000Z
pelote/classes/traversal.py
medialab/pelote
cef80daeb19ef2fef73f8a1fcfc8477aa11bfb9a
[ "MIT" ]
55
2022-03-02T16:19:30.000Z
2022-03-31T12:44:05.000Z
pelote/classes/traversal.py
medialab/pelote
cef80daeb19ef2fef73f8a1fcfc8477aa11bfb9a
[ "MIT" ]
null
null
null
# ============================================================================= # Pelote DFS Stack Class # ============================================================================= # from typing import Generic, Optional, List, Set, TypeVar, Generator, Deque, cast from pelote.types import AnyGraph K = TypeVar("K") V = TypeVar("V") class DFSStack(Generic[K, V]): """ Specialized stack structure tailored to perform memory-efficient DFS traversal in graphs. """ def __init__(self, graph: AnyGraph): self.__graph = graph self.__stack: List[V] = [] self.__seen: Set[K] = set() def __len__(self) -> int: return len(self.__stack) def __contains__(self, node: K) -> bool: return node in self.__seen def has_already_seen_everything(self) -> bool: return len(self.__seen) == len(self.__graph) def nodes_yet_unseen(self) -> Generator[K, None, None]: for node in self.__graph: if len(self.__seen) == len(self.__graph): break if node in self.__seen: continue yield node def append(self, node: K, item: Optional[V] = None) -> bool: size_before = len(self.__seen) self.__seen.add(node) if size_before == len(self.__seen): return False self.__stack.append(cast(V, node) if item is None else item) return True def pop(self) -> V: return self.__stack.pop() class BFSQueue(Generic[K, V]): """ Specialized queue structure tailored to perform memory-efficient BFS traversal in graphs. """ def __init__(self, graph: AnyGraph): self.__graph = graph self.__queue: Deque[V] = Deque() self.__seen: Set[K] = set() def __len__(self) -> int: return len(self.__queue) def __contains__(self, node: K) -> bool: return node in self.__seen def has_already_seen_everything(self) -> bool: return len(self.__seen) == len(self.__graph) def nodes_yet_unseen(self) -> Generator[K, None, None]: for node in self.__graph: if len(self.__seen) == len(self.__graph): break if node in self.__seen: continue yield node def append(self, node: K, item: Optional[V] = None) -> bool: size_before = len(self.__seen) self.__seen.add(node) if size_before == len(self.__seen): return False self.__queue.append(cast(V, node) if item is None else item) return True def popleft(self) -> V: return self.__queue.popleft()
25.786408
80
0.556852
319
2,656
4.316614
0.210031
0.092956
0.063907
0.040668
0.769789
0.769789
0.71024
0.71024
0.71024
0.71024
0
0
0.280497
2,656
102
81
26.039216
0.720565
0.135166
0
0.724138
0
0
0.000887
0
0
0
0
0
0
1
0.241379
false
0
0.034483
0.137931
0.517241
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
8
c3bdf9c083ceca20d25857a6145ca0d5aad65bc7
16,497
py
Python
23-letters.py
hashbangstudio/Python-Minecraft-Examples
5e4632022a99a7ccc130972d4e8da9d09572492d
[ "BSD-3-Clause" ]
4
2016-06-07T15:30:52.000Z
2020-04-13T15:16:28.000Z
23-letters.py
hashbangstudio/Python-Minecraft-Examples
5e4632022a99a7ccc130972d4e8da9d09572492d
[ "BSD-3-Clause" ]
null
null
null
23-letters.py
hashbangstudio/Python-Minecraft-Examples
5e4632022a99a7ccc130972d4e8da9d09572492d
[ "BSD-3-Clause" ]
3
2016-11-27T22:27:16.000Z
2021-12-12T14:53:11.000Z
#!/usr/bin/env python import mcpi.minecraft as minecraft import mcpi.block as mc_block import sys A = [" 111 ", "1 1", "1 1", "11111", "11111", "1 1", "1 1"] B = ["1111 ", "1 1", "1 1", "1111 ", "1 1", "1 1", "1111 "] C = [" 111 ", "1 1", "1 ", "1 ", "1 ", "1 1", " 111 "] D = ["111 ", "1 1 ", "1 1", "1 1", "1 1", "1 1 ", "111 "] E = ["11111", "1 ", "1 ", "11111", "1 ", "1 ", "11111"] F = ["11111", "1 ", "1 ", "11111", "1 ", "1 ", "1 "] G = [" 111 ", "1 1", "1 ", "1 ", "1 111", "1 1", " 111 "] H = ["1 1", "1 1", "1 1", "11111", "1 1", "1 1", "1 1"] I = ["11111", " 1 ", " 1 ", " 1 ", " 1 ", " 1 ", "11111"] J = ["11111", " 1 ", " 1 ", " 1 ", "1 1 ", "1 1 ", "111 "] K = ["1 1", "1 1 ", "1 1 ", "11 ", "1 1 ", "1 1 ", "1 1"] L = ["1 ", "1 ", "1 ", "1 ", "1 ", "1 ", "11111"] M = ["1 1", "11 11", "1 1 1", "1 1", "1 1", "1 1", "1 1"] N = ["1 1", "11 1", "1 1 1", "1 11", "1 1", "1 1", "1 1"] O = [" 111 ", "1 1", "1 1", "1 1", "1 1", "1 1", " 111 "] P = ["1111 ", "1 1", "1 1", "1111 ", "1 ", "1 ", "1 "] Q = [" 11 ", "1 1 ", "1 1 ", "1 1 ", "1 1 ", "1 1 ", " 1111"] R = ["1111 ", "1 1", "1 1", "1111 ", "1 1 ", "1 1 ", "1 1"] S = [" 111 ", "1 1", "1 ", " 111 ", " 1", "1 1", " 111 "] T = ["11111", " 1 ", " 1 ", " 1 ", " 1 ", " 1 ", " 1 "] U = ["1 1", "1 1", "1 1", "1 1", "1 1", "1 1", " 111 "] V = ["1 1", "1 1", "1 1", "1 1", "1 1", " 1 1 ", " 1 "] W = ["1 1", "1 1", "1 1", "1 1", "1 1 1", "11 11", "1 1"] X = ["1 1", "1 1", " 1 1 ", " 1 ", " 1 1 ", "1 1", "1 1"] Y = ["1 1", "1 1", " 1 1 ", " 1 ", " 1 ", " 1 ", " 1 "] Z = ["11111", " 1", " 1 ", " 1 ", " 1 ", "1 ", "11111"] a = [" ", " ", " 11 ", " 1 ", "1111 ", "1 1 ", "1111 "] b = [" ", " ", "1 ", "1 ", "1111 ", "1 1 ", "1111 "] c = [" ", " ", " 11 ", "1 1 ", "1 ", "1 1 ", " 11 "] d = [" ", " ", " 1 ", " 1 ", " 111 ", "1 1 ", " 1111"] e = [" ", " ", " 111 ", "1 1", "1111 ", "1 ", " 1111"] f = [" ", " ", " 11 ", "1 1 ", "11 ", "1 ", "1 "] g = [" ", " ", " 111 ", "1 1 ", " 111 ", " 1 ", "111 "] h = [" ", " ", "1 ", "1 ", "1111 ", "1 1 ", "1 1 "] i = [" ", " ", " 1 ", " ", " 1 ", " 1 ", " 1 "] j = [" ", " ", " 1 ", " ", " 1 ", "1 1 ", " 11 "] k = [" ", " ", "1 ", "1 1 ", "11 ", "1 1 ", "1 1 "] l = [" ", " ", "1 ", "1 ", "1 ", "1 1 ", " 11 "] m = [" ", " ", " 111 ", "1 1 1", "1 1 1", "1 1", "1 1"] n = [" ", " ", " 111 ", "1 1", "1 1", "1 1", "1 1"] o = [" ", " ", " 111 ", "1 1", "1 1", "1 1", " 111 "] p = [" ", " ", "111 ", "1 1 ", "111 ", "1 ", "1 "] q = [" ", " ", " 111 ", "1 1 ", " 111 ", " 1 ", " 11"] r = [" ", " ", " 11 ", "1 1 ", "1 ", "1 ", "1 "] s = [" ", " ", " 111 ", "1 ", " 11 ", " 1 ", "111 "] t = [" ", " ", " 1 ", "111 ", " 1 ", " 1 1 ", " 111 "] u = [" ", " ", "1 1", "1 1", "1 1", "1 1", " 111 "] v = [" ", " ", "1 1", "1 1", "1 1", " 1 1 ", " 1 "] w = [" ", " ", "1 1", "1 1", "1 1 1", "11 11", "1 1"] x = [" ", " ", "1 1", " 1 1 ", " 1 ", " 1 1 ", "1 1"] y = [" ", " ", "1 1 ", "1 1 ", " 111 ", " 1 ", "111 "] z = [" ", " ", "11111", " 1 ", " 1 ", " 1 ", "11111"] ONE_1 = [" 1 ", " 11 ", "1 1 ", " 1 ", " 1 ", " 1 ", "11111"] TWO_2 = ["11111", " 1", " 1", "11111", "1 ", "1 ", "11111"] THREE_3 = [" 111 ", "1 1", " 1", " 111 ", " 1", "1 1", " 111 "] FOUR_4 = ["1 1", "1 1", "1 1", "11111", " 1", " 1", " 1"] FIVE_5 = ["11111", "1 ", "1 ", "11111", " 1", " 1", "11111"] SIX_6 = ["1111 ", "1 ", "1 ", "11111", "1 1", "1 1", "11111"] SEVEN_7 = ["11111", " 1", " 1 ", " 1 ", " 1 ", " 1 ", " 1 "] EIGHT_8 = ["11111", "1 1", "1 1", "11111", "1 1", "1 1", "11111"] NINE_9 = [" 111 ", "1 1", "1 1", " 111 ", " 1 ", " 1 ", " 1 "] ZERO_0 = [" 111 ", "1 1", "1 11", "1 1 1", "11 1", "1 1", " 111 "] BRACKET_OPEN = [" 1 ", " 1 ", "1 ", "1 ", "1 ", " 1 ", " 1 "] BRACKET_CLOSE = [" 1 ", " 1 ", " 1", " 1", " 1", " 1 ", " 1 "] FORWARD_SLASH = [" ", " 1", " 1 ", " 1 ", " 1 ", "1 ", " "] DIVIDE = [" ", " 1 ", " ", "11111", " ", " 1 ", " "] DOT = [" ", " ", " 111 ", " 111 ", " 111 ", " ", " "] PLUS = [" 1 ", " 1 ", " 1 ", "11111", " 1 ", " 1 ", " 1 "] MINUS = [" ", " ", " ", "11111", " ", " ", " "] DOLLAR_US = [" 1 ", " 1111", "1 1 ", " 111 ", " 1 1", "1111 ", " 1 "] POUND_STERLING = [" 1 ", " 1 1 ", " 1 ", "111 ", " 1 ", " 1 ", "1111 "] CARET = [" 1 ", " 1 1 ", "1 1", " ", " ", " ", " "] ASTERIX = [" 1 ", "1 1 1", " 111 ", "11111", " 111 ", "1 1 1", " 1 "] AMPERSAND = [" 1 ", "1 1 ", "1 1 ", " 1 ", "1 1 1", "1 1 ", " 11 1"] EXCLAMATION_MARK = [" 1 ", " 1 ", " 1 ", " 1 ", " 1 ", " ", " 1 "] QUESTION_MARK = [" 111 ", "1 1", " 1 ", " 1 ", " 1 ", " ", " 1 "] DOUBLE_QUOTE = [" 1 1 ", " 1 1 ", " ", " ", " ", " ", " "] SINGLE_QUOTE = [" 1 ", " 1 ", " ", " ", " ", " ", " "] APOSTROPHE = [" 1 ", " 1 ", " ", " ", " ", " ", " "] COMMA = [" ", " ", " ", " 11 ", " 11 ", " 1 ", " 1 "] FULL_STOP = [" ", " ", " ", " ", " ", " 11 ", " 11 "] AT_SYMBOL = [" 111 ", "1 1", "1 111", "1 1 1", "1 1 1", "1 1 1", "1 111"] HASH = [" 1 1 ", " 1 1 ", "11111", " 1 1 ", "11111", " 1 1 ", " 1 1 "] TILDE = [" ", " ", " ", " 1 1 ", "1 1 ", " ", " "] COLON = [" ", " 1 ", " 1 ", " ", " ", " 1 ", " 1 "] SEMI_COLON = [" ", " 1 ", " 1 ", " ", " 1 ", " 1 ", " 11 "] MORE_THAN = ["1 ", " 1 ", " 1 ", " 1 ", " 1 ", "1 ", " "] LESS_THAN = [" 1", " 1 ", " 1 ", " 1 ", " 1 ", " 1 ", " 1"] EQUALS_SIGN = [" ", " ", "11111", " ", "11111", " ", " "] UNDERSCORE = [" ", " ", " ", " ", " ", " ", "11111"] PERCENT = ["11 ", "11 1", " 1 ", " 1 ", " 1 ", "1 11", " 11"] SPACE = [" ", " ", " ", " ", " ", " ", " "] MAP_OF_ALPHANUM_TO_GLYPH = {'A':A, 'B':B, 'C':C, 'D':D, 'E':E, 'F':F, 'G':G, 'H':H, 'I':I, 'J':J, 'K':K, 'L':L, 'M':M, 'N':N, 'O':O, 'P':P, 'Q':Q, 'R':R, 'Q':Q, 'S':S, 'T':T, 'U':U, 'V':V, 'X':X, 'Y':Y, 'Z':Z, 'a':a, 'b':b, 'c':c, 'd':d, 'e':e, 'f':f, 'g':g, 'h':h, 'i':i, 'j':j, 'k':k, 'l':l, 'm':m, 'n':n, 'o':o, 'p':p, 'q':q, 'r':r, 's':s, 't':t, 'u':u, 'v':v, 'x':x, 'y':y, 'z':z, '1':ONE_1, '2':TWO_2, '3':THREE_3, '4':FOUR_4, '5':FIVE_5, '6':SIX_6, '7':SEVEN_7, '8':EIGHT_8, '9':NINE_9, '0':ZERO_0, '(':BRACKET_OPEN, ')':BRACKET_CLOSE, '/':FORWARD_SLASH, '_':UNDERSCORE, '=':EQUALS_SIGN, '<':LESS_THAN, '>':MORE_THAN, '~':TILDE, ':':COLON, ';':SEMI_COLON, '@':AT_SYMBOL, '#':HASH, '\'':SINGLE_QUOTE, '\"':DOUBLE_QUOTE, ',':COMMA, '.':FULL_STOP, '\'':APOSTROPHE, '?':QUESTION_MARK, '!':EXCLAMATION_MARK, '&':AMPERSAND, '*':ASTERIX, '^':CARET, '+':PLUS, '\u00A3':POUND_STERLING, '$':DOLLAR_US, '-':MINUS, '%':PERCENT, ' ':SPACE } def convert_character_to_glyph(character): print ('char', character) return MAP_OF_ALPHANUM_TO_GLYPH[character] def create_character_at_coords_with_block_on_x_axis(character, xCoord, yCoord, zCoord, blockToUse): glyph = convert_character_to_glyph(character) print ('glyph', glyph) for y, row in enumerate(glyph): print ('y=', y, 'row', row) for x, column in enumerate(row): #check if glyph block should have a block, air or inverse print "col is", column print "x = ", x print column==1 print column=='1' if(column == '1'): print "creating block" mc.setBlock(xCoord + (len(row)-x), yCoord + (len(glyph)-y) , zCoord, blockToUse) else: mc.setBlock(xCoord + (len(row)-x), yCoord + (len(glyph)-y) , zCoord, mc_block.AIR) def print_string_to_world(string, lowerLeftX, lowerLeftY, lowerLeftZ, blockToUse): #iterate through the string per character writing into the world. x = lowerLeftX y = lowerLeftY z = lowerLeftZ for letter in string: create_character_at_coords_with_block_on_x_axis(letter, x, y, z, blockToUse) x -= 6 if __name__ == "__main__": mc = minecraft.Minecraft.create() pos = mc.player.getTilePos() numOfArgs = len(sys.argv) if numOfArgs == 2: print_string_to_world(sys.argv[1], pos.x+19, pos.y+1, pos.z+19, mc_block.WOOL.withData(2)) elif numOfArgs == 3: blockIdAndData = sys.argv[2].split(',') blockId = int(blockIdAndData[0]) blockData = int(blockIdAndData[1]) blockToUse = mc_block.Block(blockId,blockData) print_string_to_world(sys.argv[1], pos.x+19, pos.y+1, pos.z+19, blockToUse) else: print("incorrect number of arguments") sys.exit()
18.577703
99
0.190944
1,281
16,497
2.379391
0.13427
0.288058
0.312008
0.300525
0.488845
0.422572
0.372375
0.278215
0.249344
0.2021
0
0.193093
0.643693
16,497
887
100
18.598647
0.325451
0.008486
0
0.715573
0
0.005148
0.207533
0
0
0
0
0
0
0
null
null
0
0.003861
null
null
0.015444
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
7
c3be4dd895838e8740c4e5d3239b6da4b14c81ea
68,334
py
Python
sdk/python/pulumi_gcp/compute/vpn_tunnel.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
121
2018-06-18T19:16:42.000Z
2022-03-31T06:06:48.000Z
sdk/python/pulumi_gcp/compute/vpn_tunnel.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
492
2018-06-22T19:41:03.000Z
2022-03-31T15:33:53.000Z
sdk/python/pulumi_gcp/compute/vpn_tunnel.py
sisisin/pulumi-gcp
af6681d70ea457843409110c1324817fe55f68ad
[ "ECL-2.0", "Apache-2.0" ]
43
2018-06-19T01:43:13.000Z
2022-03-23T22:43:37.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['VPNTunnelArgs', 'VPNTunnel'] @pulumi.input_type class VPNTunnelArgs: def __init__(__self__, *, shared_secret: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, ike_version: Optional[pulumi.Input[int]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, local_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, peer_external_gateway: Optional[pulumi.Input[str]] = None, peer_external_gateway_interface: Optional[pulumi.Input[int]] = None, peer_gcp_gateway: Optional[pulumi.Input[str]] = None, peer_ip: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, remote_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, router: Optional[pulumi.Input[str]] = None, target_vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway_interface: Optional[pulumi.Input[int]] = None): """ The set of arguments for constructing a VPNTunnel resource. :param pulumi.Input[str] shared_secret: Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway. **Note**: This property is sensitive and will not be displayed in the plan. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[int] ike_version: IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this VpnTunnel. :param pulumi.Input[Sequence[pulumi.Input[str]]] local_traffic_selectors: Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] name: Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] peer_external_gateway: URL of the peer side external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[int] peer_external_gateway_interface: The interface ID of the external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[str] peer_gcp_gateway: URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. If provided, the VPN tunnel will automatically use the same vpn_gateway_interface ID in the peer GCP VPN gateway. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[str] peer_ip: IP address of the peer VPN gateway. Only IPv4 is supported. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region where the tunnel is located. If unset, is set to the region of `target_vpn_gateway`. :param pulumi.Input[Sequence[pulumi.Input[str]]] remote_traffic_selectors: Remote traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] router: URL of router resource to be used for dynamic routing. :param pulumi.Input[str] target_vpn_gateway: URL of the Target VPN gateway with which this VPN tunnel is associated. :param pulumi.Input[str] vpn_gateway: URL of the VPN gateway with which this VPN tunnel is associated. This must be used if a High Availability VPN gateway resource is created. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[int] vpn_gateway_interface: The interface ID of the VPN gateway with which this VPN tunnel is associated. """ pulumi.set(__self__, "shared_secret", shared_secret) if description is not None: pulumi.set(__self__, "description", description) if ike_version is not None: pulumi.set(__self__, "ike_version", ike_version) if labels is not None: pulumi.set(__self__, "labels", labels) if local_traffic_selectors is not None: pulumi.set(__self__, "local_traffic_selectors", local_traffic_selectors) if name is not None: pulumi.set(__self__, "name", name) if peer_external_gateway is not None: pulumi.set(__self__, "peer_external_gateway", peer_external_gateway) if peer_external_gateway_interface is not None: pulumi.set(__self__, "peer_external_gateway_interface", peer_external_gateway_interface) if peer_gcp_gateway is not None: pulumi.set(__self__, "peer_gcp_gateway", peer_gcp_gateway) if peer_ip is not None: pulumi.set(__self__, "peer_ip", peer_ip) if project is not None: pulumi.set(__self__, "project", project) if region is not None: pulumi.set(__self__, "region", region) if remote_traffic_selectors is not None: pulumi.set(__self__, "remote_traffic_selectors", remote_traffic_selectors) if router is not None: pulumi.set(__self__, "router", router) if target_vpn_gateway is not None: pulumi.set(__self__, "target_vpn_gateway", target_vpn_gateway) if vpn_gateway is not None: pulumi.set(__self__, "vpn_gateway", vpn_gateway) if vpn_gateway_interface is not None: pulumi.set(__self__, "vpn_gateway_interface", vpn_gateway_interface) @property @pulumi.getter(name="sharedSecret") def shared_secret(self) -> pulumi.Input[str]: """ Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway. **Note**: This property is sensitive and will not be displayed in the plan. """ return pulumi.get(self, "shared_secret") @shared_secret.setter def shared_secret(self, value: pulumi.Input[str]): pulumi.set(self, "shared_secret", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="ikeVersion") def ike_version(self) -> Optional[pulumi.Input[int]]: """ IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2. """ return pulumi.get(self, "ike_version") @ike_version.setter def ike_version(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ike_version", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to this VpnTunnel. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="localTrafficSelectors") def local_traffic_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. """ return pulumi.get(self, "local_traffic_selectors") @local_traffic_selectors.setter def local_traffic_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "local_traffic_selectors", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="peerExternalGateway") def peer_external_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the peer side external VPN gateway to which this VPN tunnel is connected. """ return pulumi.get(self, "peer_external_gateway") @peer_external_gateway.setter def peer_external_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_external_gateway", value) @property @pulumi.getter(name="peerExternalGatewayInterface") def peer_external_gateway_interface(self) -> Optional[pulumi.Input[int]]: """ The interface ID of the external VPN gateway to which this VPN tunnel is connected. """ return pulumi.get(self, "peer_external_gateway_interface") @peer_external_gateway_interface.setter def peer_external_gateway_interface(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "peer_external_gateway_interface", value) @property @pulumi.getter(name="peerGcpGateway") def peer_gcp_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. If provided, the VPN tunnel will automatically use the same vpn_gateway_interface ID in the peer GCP VPN gateway. This field must reference a `compute.HaVpnGateway` resource. """ return pulumi.get(self, "peer_gcp_gateway") @peer_gcp_gateway.setter def peer_gcp_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_gcp_gateway", value) @property @pulumi.getter(name="peerIp") def peer_ip(self) -> Optional[pulumi.Input[str]]: """ IP address of the peer VPN gateway. Only IPv4 is supported. """ return pulumi.get(self, "peer_ip") @peer_ip.setter def peer_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_ip", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region where the tunnel is located. If unset, is set to the region of `target_vpn_gateway`. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter(name="remoteTrafficSelectors") def remote_traffic_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Remote traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. """ return pulumi.get(self, "remote_traffic_selectors") @remote_traffic_selectors.setter def remote_traffic_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "remote_traffic_selectors", value) @property @pulumi.getter def router(self) -> Optional[pulumi.Input[str]]: """ URL of router resource to be used for dynamic routing. """ return pulumi.get(self, "router") @router.setter def router(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "router", value) @property @pulumi.getter(name="targetVpnGateway") def target_vpn_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the Target VPN gateway with which this VPN tunnel is associated. """ return pulumi.get(self, "target_vpn_gateway") @target_vpn_gateway.setter def target_vpn_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target_vpn_gateway", value) @property @pulumi.getter(name="vpnGateway") def vpn_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the VPN gateway with which this VPN tunnel is associated. This must be used if a High Availability VPN gateway resource is created. This field must reference a `compute.HaVpnGateway` resource. """ return pulumi.get(self, "vpn_gateway") @vpn_gateway.setter def vpn_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "vpn_gateway", value) @property @pulumi.getter(name="vpnGatewayInterface") def vpn_gateway_interface(self) -> Optional[pulumi.Input[int]]: """ The interface ID of the VPN gateway with which this VPN tunnel is associated. """ return pulumi.get(self, "vpn_gateway_interface") @vpn_gateway_interface.setter def vpn_gateway_interface(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "vpn_gateway_interface", value) @pulumi.input_type class _VPNTunnelState: def __init__(__self__, *, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, detailed_status: Optional[pulumi.Input[str]] = None, ike_version: Optional[pulumi.Input[int]] = None, label_fingerprint: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, local_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, peer_external_gateway: Optional[pulumi.Input[str]] = None, peer_external_gateway_interface: Optional[pulumi.Input[int]] = None, peer_gcp_gateway: Optional[pulumi.Input[str]] = None, peer_ip: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, remote_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, router: Optional[pulumi.Input[str]] = None, self_link: Optional[pulumi.Input[str]] = None, shared_secret: Optional[pulumi.Input[str]] = None, shared_secret_hash: Optional[pulumi.Input[str]] = None, target_vpn_gateway: Optional[pulumi.Input[str]] = None, tunnel_id: Optional[pulumi.Input[str]] = None, vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway_interface: Optional[pulumi.Input[int]] = None): """ Input properties used for looking up and filtering VPNTunnel resources. :param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] detailed_status: Detailed status message for the VPN tunnel. :param pulumi.Input[int] ike_version: IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2. :param pulumi.Input[str] label_fingerprint: The fingerprint used for optimistic locking of this resource. Used internally during updates. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this VpnTunnel. :param pulumi.Input[Sequence[pulumi.Input[str]]] local_traffic_selectors: Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] name: Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] peer_external_gateway: URL of the peer side external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[int] peer_external_gateway_interface: The interface ID of the external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[str] peer_gcp_gateway: URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. If provided, the VPN tunnel will automatically use the same vpn_gateway_interface ID in the peer GCP VPN gateway. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[str] peer_ip: IP address of the peer VPN gateway. Only IPv4 is supported. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region where the tunnel is located. If unset, is set to the region of `target_vpn_gateway`. :param pulumi.Input[Sequence[pulumi.Input[str]]] remote_traffic_selectors: Remote traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] router: URL of router resource to be used for dynamic routing. :param pulumi.Input[str] self_link: The URI of the created resource. :param pulumi.Input[str] shared_secret: Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway. **Note**: This property is sensitive and will not be displayed in the plan. :param pulumi.Input[str] shared_secret_hash: Hash of the shared secret. :param pulumi.Input[str] target_vpn_gateway: URL of the Target VPN gateway with which this VPN tunnel is associated. :param pulumi.Input[str] tunnel_id: The unique identifier for the resource. This identifier is defined by the server. :param pulumi.Input[str] vpn_gateway: URL of the VPN gateway with which this VPN tunnel is associated. This must be used if a High Availability VPN gateway resource is created. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[int] vpn_gateway_interface: The interface ID of the VPN gateway with which this VPN tunnel is associated. """ if creation_timestamp is not None: pulumi.set(__self__, "creation_timestamp", creation_timestamp) if description is not None: pulumi.set(__self__, "description", description) if detailed_status is not None: pulumi.set(__self__, "detailed_status", detailed_status) if ike_version is not None: pulumi.set(__self__, "ike_version", ike_version) if label_fingerprint is not None: pulumi.set(__self__, "label_fingerprint", label_fingerprint) if labels is not None: pulumi.set(__self__, "labels", labels) if local_traffic_selectors is not None: pulumi.set(__self__, "local_traffic_selectors", local_traffic_selectors) if name is not None: pulumi.set(__self__, "name", name) if peer_external_gateway is not None: pulumi.set(__self__, "peer_external_gateway", peer_external_gateway) if peer_external_gateway_interface is not None: pulumi.set(__self__, "peer_external_gateway_interface", peer_external_gateway_interface) if peer_gcp_gateway is not None: pulumi.set(__self__, "peer_gcp_gateway", peer_gcp_gateway) if peer_ip is not None: pulumi.set(__self__, "peer_ip", peer_ip) if project is not None: pulumi.set(__self__, "project", project) if region is not None: pulumi.set(__self__, "region", region) if remote_traffic_selectors is not None: pulumi.set(__self__, "remote_traffic_selectors", remote_traffic_selectors) if router is not None: pulumi.set(__self__, "router", router) if self_link is not None: pulumi.set(__self__, "self_link", self_link) if shared_secret is not None: pulumi.set(__self__, "shared_secret", shared_secret) if shared_secret_hash is not None: pulumi.set(__self__, "shared_secret_hash", shared_secret_hash) if target_vpn_gateway is not None: pulumi.set(__self__, "target_vpn_gateway", target_vpn_gateway) if tunnel_id is not None: pulumi.set(__self__, "tunnel_id", tunnel_id) if vpn_gateway is not None: pulumi.set(__self__, "vpn_gateway", vpn_gateway) if vpn_gateway_interface is not None: pulumi.set(__self__, "vpn_gateway_interface", vpn_gateway_interface) @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> Optional[pulumi.Input[str]]: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @creation_timestamp.setter def creation_timestamp(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "creation_timestamp", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="detailedStatus") def detailed_status(self) -> Optional[pulumi.Input[str]]: """ Detailed status message for the VPN tunnel. """ return pulumi.get(self, "detailed_status") @detailed_status.setter def detailed_status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "detailed_status", value) @property @pulumi.getter(name="ikeVersion") def ike_version(self) -> Optional[pulumi.Input[int]]: """ IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2. """ return pulumi.get(self, "ike_version") @ike_version.setter def ike_version(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "ike_version", value) @property @pulumi.getter(name="labelFingerprint") def label_fingerprint(self) -> Optional[pulumi.Input[str]]: """ The fingerprint used for optimistic locking of this resource. Used internally during updates. """ return pulumi.get(self, "label_fingerprint") @label_fingerprint.setter def label_fingerprint(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "label_fingerprint", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Labels to apply to this VpnTunnel. """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter(name="localTrafficSelectors") def local_traffic_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. """ return pulumi.get(self, "local_traffic_selectors") @local_traffic_selectors.setter def local_traffic_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "local_traffic_selectors", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="peerExternalGateway") def peer_external_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the peer side external VPN gateway to which this VPN tunnel is connected. """ return pulumi.get(self, "peer_external_gateway") @peer_external_gateway.setter def peer_external_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_external_gateway", value) @property @pulumi.getter(name="peerExternalGatewayInterface") def peer_external_gateway_interface(self) -> Optional[pulumi.Input[int]]: """ The interface ID of the external VPN gateway to which this VPN tunnel is connected. """ return pulumi.get(self, "peer_external_gateway_interface") @peer_external_gateway_interface.setter def peer_external_gateway_interface(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "peer_external_gateway_interface", value) @property @pulumi.getter(name="peerGcpGateway") def peer_gcp_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. If provided, the VPN tunnel will automatically use the same vpn_gateway_interface ID in the peer GCP VPN gateway. This field must reference a `compute.HaVpnGateway` resource. """ return pulumi.get(self, "peer_gcp_gateway") @peer_gcp_gateway.setter def peer_gcp_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_gcp_gateway", value) @property @pulumi.getter(name="peerIp") def peer_ip(self) -> Optional[pulumi.Input[str]]: """ IP address of the peer VPN gateway. Only IPv4 is supported. """ return pulumi.get(self, "peer_ip") @peer_ip.setter def peer_ip(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "peer_ip", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region where the tunnel is located. If unset, is set to the region of `target_vpn_gateway`. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter(name="remoteTrafficSelectors") def remote_traffic_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ Remote traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. """ return pulumi.get(self, "remote_traffic_selectors") @remote_traffic_selectors.setter def remote_traffic_selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "remote_traffic_selectors", value) @property @pulumi.getter def router(self) -> Optional[pulumi.Input[str]]: """ URL of router resource to be used for dynamic routing. """ return pulumi.get(self, "router") @router.setter def router(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "router", value) @property @pulumi.getter(name="selfLink") def self_link(self) -> Optional[pulumi.Input[str]]: """ The URI of the created resource. """ return pulumi.get(self, "self_link") @self_link.setter def self_link(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "self_link", value) @property @pulumi.getter(name="sharedSecret") def shared_secret(self) -> Optional[pulumi.Input[str]]: """ Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway. **Note**: This property is sensitive and will not be displayed in the plan. """ return pulumi.get(self, "shared_secret") @shared_secret.setter def shared_secret(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "shared_secret", value) @property @pulumi.getter(name="sharedSecretHash") def shared_secret_hash(self) -> Optional[pulumi.Input[str]]: """ Hash of the shared secret. """ return pulumi.get(self, "shared_secret_hash") @shared_secret_hash.setter def shared_secret_hash(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "shared_secret_hash", value) @property @pulumi.getter(name="targetVpnGateway") def target_vpn_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the Target VPN gateway with which this VPN tunnel is associated. """ return pulumi.get(self, "target_vpn_gateway") @target_vpn_gateway.setter def target_vpn_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "target_vpn_gateway", value) @property @pulumi.getter(name="tunnelId") def tunnel_id(self) -> Optional[pulumi.Input[str]]: """ The unique identifier for the resource. This identifier is defined by the server. """ return pulumi.get(self, "tunnel_id") @tunnel_id.setter def tunnel_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "tunnel_id", value) @property @pulumi.getter(name="vpnGateway") def vpn_gateway(self) -> Optional[pulumi.Input[str]]: """ URL of the VPN gateway with which this VPN tunnel is associated. This must be used if a High Availability VPN gateway resource is created. This field must reference a `compute.HaVpnGateway` resource. """ return pulumi.get(self, "vpn_gateway") @vpn_gateway.setter def vpn_gateway(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "vpn_gateway", value) @property @pulumi.getter(name="vpnGatewayInterface") def vpn_gateway_interface(self) -> Optional[pulumi.Input[int]]: """ The interface ID of the VPN gateway with which this VPN tunnel is associated. """ return pulumi.get(self, "vpn_gateway_interface") @vpn_gateway_interface.setter def vpn_gateway_interface(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "vpn_gateway_interface", value) class VPNTunnel(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, ike_version: Optional[pulumi.Input[int]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, local_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, peer_external_gateway: Optional[pulumi.Input[str]] = None, peer_external_gateway_interface: Optional[pulumi.Input[int]] = None, peer_gcp_gateway: Optional[pulumi.Input[str]] = None, peer_ip: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, remote_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, router: Optional[pulumi.Input[str]] = None, shared_secret: Optional[pulumi.Input[str]] = None, target_vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway_interface: Optional[pulumi.Input[int]] = None, __props__=None): """ VPN tunnel resource. To get more information about VpnTunnel, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels) * How-to Guides * [Cloud VPN Overview](https://cloud.google.com/vpn/docs/concepts/overview) * [Networks and Tunnel Routing](https://cloud.google.com/vpn/docs/concepts/choosing-networks-routing) > **Warning:** All arguments including `shared_secret` will be stored in the raw state as plain-text. ## Example Usage ### Vpn Tunnel Basic ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1") target_gateway = gcp.compute.VPNGateway("targetGateway", network=network1.id) vpn_static_ip = gcp.compute.Address("vpnStaticIp") fr_esp = gcp.compute.ForwardingRule("frEsp", ip_protocol="ESP", ip_address=vpn_static_ip.address, target=target_gateway.id) fr_udp500 = gcp.compute.ForwardingRule("frUdp500", ip_protocol="UDP", port_range="500", ip_address=vpn_static_ip.address, target=target_gateway.id) fr_udp4500 = gcp.compute.ForwardingRule("frUdp4500", ip_protocol="UDP", port_range="4500", ip_address=vpn_static_ip.address, target=target_gateway.id) tunnel1 = gcp.compute.VPNTunnel("tunnel1", peer_ip="15.0.0.120", shared_secret="a secret message", target_vpn_gateway=target_gateway.id, opts=pulumi.ResourceOptions(depends_on=[ fr_esp, fr_udp500, fr_udp4500, ])) route1 = gcp.compute.Route("route1", network=network1.name, dest_range="15.0.0.0/24", priority=1000, next_hop_vpn_tunnel=tunnel1.id) ``` ### Vpn Tunnel Beta ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1", opts=pulumi.ResourceOptions(provider=google_beta)) target_gateway = gcp.compute.VPNGateway("targetGateway", network=network1.id, opts=pulumi.ResourceOptions(provider=google_beta)) vpn_static_ip = gcp.compute.Address("vpnStaticIp", opts=pulumi.ResourceOptions(provider=google_beta)) fr_esp = gcp.compute.ForwardingRule("frEsp", ip_protocol="ESP", ip_address=vpn_static_ip.address, target=target_gateway.id, opts=pulumi.ResourceOptions(provider=google_beta)) fr_udp500 = gcp.compute.ForwardingRule("frUdp500", ip_protocol="UDP", port_range="500", ip_address=vpn_static_ip.address, target=target_gateway.id, opts=pulumi.ResourceOptions(provider=google_beta)) fr_udp4500 = gcp.compute.ForwardingRule("frUdp4500", ip_protocol="UDP", port_range="4500", ip_address=vpn_static_ip.address, target=target_gateway.id, opts=pulumi.ResourceOptions(provider=google_beta)) tunnel1 = gcp.compute.VPNTunnel("tunnel1", peer_ip="15.0.0.120", shared_secret="a secret message", target_vpn_gateway=target_gateway.id, labels={ "foo": "bar", }, opts=pulumi.ResourceOptions(provider=google_beta, depends_on=[ fr_esp, fr_udp500, fr_udp4500, ])) route1 = gcp.compute.Route("route1", network=network1.name, dest_range="15.0.0.0/24", priority=1000, next_hop_vpn_tunnel=tunnel1.id, opts=pulumi.ResourceOptions(provider=google_beta)) ``` ## Import VpnTunnel can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}} ``` ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default {{project}}/{{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default {{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default {{name}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[int] ike_version: IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this VpnTunnel. :param pulumi.Input[Sequence[pulumi.Input[str]]] local_traffic_selectors: Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] name: Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] peer_external_gateway: URL of the peer side external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[int] peer_external_gateway_interface: The interface ID of the external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[str] peer_gcp_gateway: URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. If provided, the VPN tunnel will automatically use the same vpn_gateway_interface ID in the peer GCP VPN gateway. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[str] peer_ip: IP address of the peer VPN gateway. Only IPv4 is supported. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region where the tunnel is located. If unset, is set to the region of `target_vpn_gateway`. :param pulumi.Input[Sequence[pulumi.Input[str]]] remote_traffic_selectors: Remote traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] router: URL of router resource to be used for dynamic routing. :param pulumi.Input[str] shared_secret: Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway. **Note**: This property is sensitive and will not be displayed in the plan. :param pulumi.Input[str] target_vpn_gateway: URL of the Target VPN gateway with which this VPN tunnel is associated. :param pulumi.Input[str] vpn_gateway: URL of the VPN gateway with which this VPN tunnel is associated. This must be used if a High Availability VPN gateway resource is created. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[int] vpn_gateway_interface: The interface ID of the VPN gateway with which this VPN tunnel is associated. """ ... @overload def __init__(__self__, resource_name: str, args: VPNTunnelArgs, opts: Optional[pulumi.ResourceOptions] = None): """ VPN tunnel resource. To get more information about VpnTunnel, see: * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels) * How-to Guides * [Cloud VPN Overview](https://cloud.google.com/vpn/docs/concepts/overview) * [Networks and Tunnel Routing](https://cloud.google.com/vpn/docs/concepts/choosing-networks-routing) > **Warning:** All arguments including `shared_secret` will be stored in the raw state as plain-text. ## Example Usage ### Vpn Tunnel Basic ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1") target_gateway = gcp.compute.VPNGateway("targetGateway", network=network1.id) vpn_static_ip = gcp.compute.Address("vpnStaticIp") fr_esp = gcp.compute.ForwardingRule("frEsp", ip_protocol="ESP", ip_address=vpn_static_ip.address, target=target_gateway.id) fr_udp500 = gcp.compute.ForwardingRule("frUdp500", ip_protocol="UDP", port_range="500", ip_address=vpn_static_ip.address, target=target_gateway.id) fr_udp4500 = gcp.compute.ForwardingRule("frUdp4500", ip_protocol="UDP", port_range="4500", ip_address=vpn_static_ip.address, target=target_gateway.id) tunnel1 = gcp.compute.VPNTunnel("tunnel1", peer_ip="15.0.0.120", shared_secret="a secret message", target_vpn_gateway=target_gateway.id, opts=pulumi.ResourceOptions(depends_on=[ fr_esp, fr_udp500, fr_udp4500, ])) route1 = gcp.compute.Route("route1", network=network1.name, dest_range="15.0.0.0/24", priority=1000, next_hop_vpn_tunnel=tunnel1.id) ``` ### Vpn Tunnel Beta ```python import pulumi import pulumi_gcp as gcp network1 = gcp.compute.Network("network1", opts=pulumi.ResourceOptions(provider=google_beta)) target_gateway = gcp.compute.VPNGateway("targetGateway", network=network1.id, opts=pulumi.ResourceOptions(provider=google_beta)) vpn_static_ip = gcp.compute.Address("vpnStaticIp", opts=pulumi.ResourceOptions(provider=google_beta)) fr_esp = gcp.compute.ForwardingRule("frEsp", ip_protocol="ESP", ip_address=vpn_static_ip.address, target=target_gateway.id, opts=pulumi.ResourceOptions(provider=google_beta)) fr_udp500 = gcp.compute.ForwardingRule("frUdp500", ip_protocol="UDP", port_range="500", ip_address=vpn_static_ip.address, target=target_gateway.id, opts=pulumi.ResourceOptions(provider=google_beta)) fr_udp4500 = gcp.compute.ForwardingRule("frUdp4500", ip_protocol="UDP", port_range="4500", ip_address=vpn_static_ip.address, target=target_gateway.id, opts=pulumi.ResourceOptions(provider=google_beta)) tunnel1 = gcp.compute.VPNTunnel("tunnel1", peer_ip="15.0.0.120", shared_secret="a secret message", target_vpn_gateway=target_gateway.id, labels={ "foo": "bar", }, opts=pulumi.ResourceOptions(provider=google_beta, depends_on=[ fr_esp, fr_udp500, fr_udp4500, ])) route1 = gcp.compute.Route("route1", network=network1.name, dest_range="15.0.0.0/24", priority=1000, next_hop_vpn_tunnel=tunnel1.id, opts=pulumi.ResourceOptions(provider=google_beta)) ``` ## Import VpnTunnel can be imported using any of these accepted formats ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}} ``` ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default {{project}}/{{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default {{region}}/{{name}} ``` ```sh $ pulumi import gcp:compute/vPNTunnel:VPNTunnel default {{name}} ``` :param str resource_name: The name of the resource. :param VPNTunnelArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(VPNTunnelArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, ike_version: Optional[pulumi.Input[int]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, local_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, peer_external_gateway: Optional[pulumi.Input[str]] = None, peer_external_gateway_interface: Optional[pulumi.Input[int]] = None, peer_gcp_gateway: Optional[pulumi.Input[str]] = None, peer_ip: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, remote_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, router: Optional[pulumi.Input[str]] = None, shared_secret: Optional[pulumi.Input[str]] = None, target_vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway_interface: Optional[pulumi.Input[int]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = VPNTunnelArgs.__new__(VPNTunnelArgs) __props__.__dict__["description"] = description __props__.__dict__["ike_version"] = ike_version __props__.__dict__["labels"] = labels __props__.__dict__["local_traffic_selectors"] = local_traffic_selectors __props__.__dict__["name"] = name __props__.__dict__["peer_external_gateway"] = peer_external_gateway __props__.__dict__["peer_external_gateway_interface"] = peer_external_gateway_interface __props__.__dict__["peer_gcp_gateway"] = peer_gcp_gateway __props__.__dict__["peer_ip"] = peer_ip __props__.__dict__["project"] = project __props__.__dict__["region"] = region __props__.__dict__["remote_traffic_selectors"] = remote_traffic_selectors __props__.__dict__["router"] = router if shared_secret is None and not opts.urn: raise TypeError("Missing required property 'shared_secret'") __props__.__dict__["shared_secret"] = shared_secret __props__.__dict__["target_vpn_gateway"] = target_vpn_gateway __props__.__dict__["vpn_gateway"] = vpn_gateway __props__.__dict__["vpn_gateway_interface"] = vpn_gateway_interface __props__.__dict__["creation_timestamp"] = None __props__.__dict__["detailed_status"] = None __props__.__dict__["label_fingerprint"] = None __props__.__dict__["self_link"] = None __props__.__dict__["shared_secret_hash"] = None __props__.__dict__["tunnel_id"] = None super(VPNTunnel, __self__).__init__( 'gcp:compute/vPNTunnel:VPNTunnel', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, creation_timestamp: Optional[pulumi.Input[str]] = None, description: Optional[pulumi.Input[str]] = None, detailed_status: Optional[pulumi.Input[str]] = None, ike_version: Optional[pulumi.Input[int]] = None, label_fingerprint: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, local_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, peer_external_gateway: Optional[pulumi.Input[str]] = None, peer_external_gateway_interface: Optional[pulumi.Input[int]] = None, peer_gcp_gateway: Optional[pulumi.Input[str]] = None, peer_ip: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, remote_traffic_selectors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, router: Optional[pulumi.Input[str]] = None, self_link: Optional[pulumi.Input[str]] = None, shared_secret: Optional[pulumi.Input[str]] = None, shared_secret_hash: Optional[pulumi.Input[str]] = None, target_vpn_gateway: Optional[pulumi.Input[str]] = None, tunnel_id: Optional[pulumi.Input[str]] = None, vpn_gateway: Optional[pulumi.Input[str]] = None, vpn_gateway_interface: Optional[pulumi.Input[int]] = None) -> 'VPNTunnel': """ Get an existing VPNTunnel resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format. :param pulumi.Input[str] description: An optional description of this resource. :param pulumi.Input[str] detailed_status: Detailed status message for the VPN tunnel. :param pulumi.Input[int] ike_version: IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2. :param pulumi.Input[str] label_fingerprint: The fingerprint used for optimistic locking of this resource. Used internally during updates. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this VpnTunnel. :param pulumi.Input[Sequence[pulumi.Input[str]]] local_traffic_selectors: Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] name: Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. :param pulumi.Input[str] peer_external_gateway: URL of the peer side external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[int] peer_external_gateway_interface: The interface ID of the external VPN gateway to which this VPN tunnel is connected. :param pulumi.Input[str] peer_gcp_gateway: URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. If provided, the VPN tunnel will automatically use the same vpn_gateway_interface ID in the peer GCP VPN gateway. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[str] peer_ip: IP address of the peer VPN gateway. Only IPv4 is supported. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region where the tunnel is located. If unset, is set to the region of `target_vpn_gateway`. :param pulumi.Input[Sequence[pulumi.Input[str]]] remote_traffic_selectors: Remote traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. :param pulumi.Input[str] router: URL of router resource to be used for dynamic routing. :param pulumi.Input[str] self_link: The URI of the created resource. :param pulumi.Input[str] shared_secret: Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway. **Note**: This property is sensitive and will not be displayed in the plan. :param pulumi.Input[str] shared_secret_hash: Hash of the shared secret. :param pulumi.Input[str] target_vpn_gateway: URL of the Target VPN gateway with which this VPN tunnel is associated. :param pulumi.Input[str] tunnel_id: The unique identifier for the resource. This identifier is defined by the server. :param pulumi.Input[str] vpn_gateway: URL of the VPN gateway with which this VPN tunnel is associated. This must be used if a High Availability VPN gateway resource is created. This field must reference a `compute.HaVpnGateway` resource. :param pulumi.Input[int] vpn_gateway_interface: The interface ID of the VPN gateway with which this VPN tunnel is associated. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _VPNTunnelState.__new__(_VPNTunnelState) __props__.__dict__["creation_timestamp"] = creation_timestamp __props__.__dict__["description"] = description __props__.__dict__["detailed_status"] = detailed_status __props__.__dict__["ike_version"] = ike_version __props__.__dict__["label_fingerprint"] = label_fingerprint __props__.__dict__["labels"] = labels __props__.__dict__["local_traffic_selectors"] = local_traffic_selectors __props__.__dict__["name"] = name __props__.__dict__["peer_external_gateway"] = peer_external_gateway __props__.__dict__["peer_external_gateway_interface"] = peer_external_gateway_interface __props__.__dict__["peer_gcp_gateway"] = peer_gcp_gateway __props__.__dict__["peer_ip"] = peer_ip __props__.__dict__["project"] = project __props__.__dict__["region"] = region __props__.__dict__["remote_traffic_selectors"] = remote_traffic_selectors __props__.__dict__["router"] = router __props__.__dict__["self_link"] = self_link __props__.__dict__["shared_secret"] = shared_secret __props__.__dict__["shared_secret_hash"] = shared_secret_hash __props__.__dict__["target_vpn_gateway"] = target_vpn_gateway __props__.__dict__["tunnel_id"] = tunnel_id __props__.__dict__["vpn_gateway"] = vpn_gateway __props__.__dict__["vpn_gateway_interface"] = vpn_gateway_interface return VPNTunnel(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="creationTimestamp") def creation_timestamp(self) -> pulumi.Output[str]: """ Creation timestamp in RFC3339 text format. """ return pulumi.get(self, "creation_timestamp") @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ An optional description of this resource. """ return pulumi.get(self, "description") @property @pulumi.getter(name="detailedStatus") def detailed_status(self) -> pulumi.Output[str]: """ Detailed status message for the VPN tunnel. """ return pulumi.get(self, "detailed_status") @property @pulumi.getter(name="ikeVersion") def ike_version(self) -> pulumi.Output[Optional[int]]: """ IKE protocol version to use when establishing the VPN tunnel with peer VPN gateway. Acceptable IKE versions are 1 or 2. Default version is 2. """ return pulumi.get(self, "ike_version") @property @pulumi.getter(name="labelFingerprint") def label_fingerprint(self) -> pulumi.Output[str]: """ The fingerprint used for optimistic locking of this resource. Used internally during updates. """ return pulumi.get(self, "label_fingerprint") @property @pulumi.getter def labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Labels to apply to this VpnTunnel. """ return pulumi.get(self, "labels") @property @pulumi.getter(name="localTrafficSelectors") def local_traffic_selectors(self) -> pulumi.Output[Sequence[str]]: """ Local traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. """ return pulumi.get(self, "local_traffic_selectors") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `a-z?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. """ return pulumi.get(self, "name") @property @pulumi.getter(name="peerExternalGateway") def peer_external_gateway(self) -> pulumi.Output[Optional[str]]: """ URL of the peer side external VPN gateway to which this VPN tunnel is connected. """ return pulumi.get(self, "peer_external_gateway") @property @pulumi.getter(name="peerExternalGatewayInterface") def peer_external_gateway_interface(self) -> pulumi.Output[Optional[int]]: """ The interface ID of the external VPN gateway to which this VPN tunnel is connected. """ return pulumi.get(self, "peer_external_gateway_interface") @property @pulumi.getter(name="peerGcpGateway") def peer_gcp_gateway(self) -> pulumi.Output[Optional[str]]: """ URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. If provided, the VPN tunnel will automatically use the same vpn_gateway_interface ID in the peer GCP VPN gateway. This field must reference a `compute.HaVpnGateway` resource. """ return pulumi.get(self, "peer_gcp_gateway") @property @pulumi.getter(name="peerIp") def peer_ip(self) -> pulumi.Output[str]: """ IP address of the peer VPN gateway. Only IPv4 is supported. """ return pulumi.get(self, "peer_ip") @property @pulumi.getter def project(self) -> pulumi.Output[str]: """ The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @property @pulumi.getter def region(self) -> pulumi.Output[str]: """ The region where the tunnel is located. If unset, is set to the region of `target_vpn_gateway`. """ return pulumi.get(self, "region") @property @pulumi.getter(name="remoteTrafficSelectors") def remote_traffic_selectors(self) -> pulumi.Output[Sequence[str]]: """ Remote traffic selector to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example `192.168.0.0/16`. The ranges should be disjoint. Only IPv4 is supported. """ return pulumi.get(self, "remote_traffic_selectors") @property @pulumi.getter def router(self) -> pulumi.Output[Optional[str]]: """ URL of router resource to be used for dynamic routing. """ return pulumi.get(self, "router") @property @pulumi.getter(name="selfLink") def self_link(self) -> pulumi.Output[str]: """ The URI of the created resource. """ return pulumi.get(self, "self_link") @property @pulumi.getter(name="sharedSecret") def shared_secret(self) -> pulumi.Output[str]: """ Shared secret used to set the secure session between the Cloud VPN gateway and the peer VPN gateway. **Note**: This property is sensitive and will not be displayed in the plan. """ return pulumi.get(self, "shared_secret") @property @pulumi.getter(name="sharedSecretHash") def shared_secret_hash(self) -> pulumi.Output[str]: """ Hash of the shared secret. """ return pulumi.get(self, "shared_secret_hash") @property @pulumi.getter(name="targetVpnGateway") def target_vpn_gateway(self) -> pulumi.Output[Optional[str]]: """ URL of the Target VPN gateway with which this VPN tunnel is associated. """ return pulumi.get(self, "target_vpn_gateway") @property @pulumi.getter(name="tunnelId") def tunnel_id(self) -> pulumi.Output[str]: """ The unique identifier for the resource. This identifier is defined by the server. """ return pulumi.get(self, "tunnel_id") @property @pulumi.getter(name="vpnGateway") def vpn_gateway(self) -> pulumi.Output[Optional[str]]: """ URL of the VPN gateway with which this VPN tunnel is associated. This must be used if a High Availability VPN gateway resource is created. This field must reference a `compute.HaVpnGateway` resource. """ return pulumi.get(self, "vpn_gateway") @property @pulumi.getter(name="vpnGatewayInterface") def vpn_gateway_interface(self) -> pulumi.Output[Optional[int]]: """ The interface ID of the VPN gateway with which this VPN tunnel is associated. """ return pulumi.get(self, "vpn_gateway_interface")
46.67623
151
0.648696
8,376
68,334
5.104107
0.039398
0.077189
0.072043
0.061752
0.948611
0.934459
0.922202
0.918016
0.910718
0.890789
0
0.00968
0.257749
68,334
1,463
152
46.708134
0.833205
0.43542
0
0.817764
1
0
0.107571
0.039683
0
0
0
0
0
1
0.168453
false
0.001531
0.007657
0
0.278714
0.022971
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
c3d74e3c422cbcbbad8b90d64cc37de24b2ff5f4
129
py
Python
pandaf/013/data-2017.py
cpausmit/Kraken
54a5b69d274f928a5e53475b9c281815fadfc139
[ "MIT" ]
null
null
null
pandaf/013/data-2017.py
cpausmit/Kraken
54a5b69d274f928a5e53475b9c281815fadfc139
[ "MIT" ]
null
null
null
pandaf/013/data-2017.py
cpausmit/Kraken
54a5b69d274f928a5e53475b9c281815fadfc139
[ "MIT" ]
2
2017-03-22T17:33:38.000Z
2017-09-29T02:38:24.000Z
import PandaProd.Producer.opts PandaProd.Producer.opts.options.config = '31Mar2018' from PandaProd.Producer.prod import process
25.8
52
0.837209
16
129
6.75
0.625
0.472222
0.388889
0
0
0
0
0
0
0
0
0.05042
0.077519
129
4
53
32.25
0.857143
0
0
0
0
0
0.069767
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
7f0e5620faa95f61dacfdae61105f9c8dfaf0c8c
29,681
py
Python
recovery_rl/model.py
hlhang9527/recovery-rl
c916518a323ff5524bc26b9a87fc68ef19368d94
[ "MIT" ]
19
2021-05-09T23:11:21.000Z
2022-03-08T11:41:50.000Z
recovery_rl/model.py
hlhang9527/recovery-rl
c916518a323ff5524bc26b9a87fc68ef19368d94
[ "MIT" ]
null
null
null
recovery_rl/model.py
hlhang9527/recovery-rl
c916518a323ff5524bc26b9a87fc68ef19368d94
[ "MIT" ]
4
2021-05-24T19:12:39.000Z
2021-09-17T01:16:43.000Z
''' Latent dynamics models are built on latent dynamics model used in Goal-Aware Prediction: Learning to Model What Matters (ICML 2020). All other networks are built on SAC implementation from https://github.com/pranz24/pytorch-soft-actor-critic ''' import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions import Normal import numpy as np LOG_SIG_MAX = 2 LOG_SIG_MIN = -20 epsilon = 1e-6 ''' Global utilities ''' # Initialize Policy weights def weights_init_(m): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight, gain=1) torch.nn.init.constant_(m.bias, 0) # Soft update of target critic network def soft_update(target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau) # Hard update of target critic network def hard_update(target, source): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(param.data) ''' Architectures for critic functions and policies for SAC model-free recovery policies. ''' # Q network architecture class QNetwork(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim): super(QNetwork, self).__init__() # Q1 architecture self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, 1) # Q2 architecture self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear5 = nn.Linear(hidden_dim, hidden_dim) self.linear6 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, state, action): xu = torch.cat([state, action], 1) x1 = F.relu(self.linear1(xu)) x1 = F.relu(self.linear2(x1)) x1 = self.linear3(x1) x2 = F.relu(self.linear4(xu)) x2 = F.relu(self.linear5(x2)) x2 = self.linear6(x2) return x1, x2 # Q network architecture for image observations class QNetworkCNN(nn.Module): def __init__(self, observation_space, num_actions, hidden_dim, env_name): super(QNetworkCNN, self).__init__() # Process the state self.conv1 = nn.Conv2d(observation_space[-1], 128, kernel_size=3, stride=2, padding=1, bias=True) self.conv2 = nn.Conv2d(128, 64, kernel_size=3, stride=2, padding=1, bias=True) self.conv3 = nn.Conv2d(64, 16, kernel_size=3, stride=2, padding=1, bias=True) self.bn1 = nn.BatchNorm2d(128) self.bn2 = nn.BatchNorm2d(64) self.bn3 = nn.BatchNorm2d(16) self.demo_bn1 = nn.BatchNorm2d(128) self.demo_bn2 = nn.BatchNorm2d(64) self.demo_bn3 = nn.BatchNorm2d(16) if 'shelf' in env_name: self.final_linear_size = 768 elif 'maze' in env_name: self.final_linear_size = 1024 elif "reach" in env_name: self.final_linear_size = 640 else: assert (False, env_name) self.final_linear = nn.Linear(self.final_linear_size, hidden_dim) # Process the action self.linear_act1 = nn.Linear(num_actions, hidden_dim) self.linear_act2 = nn.Linear(hidden_dim, hidden_dim) self.linear_act3 = nn.Linear(hidden_dim, hidden_dim) # Q1 architecture # Post state-action merge self.linear1_1 = nn.Linear(2 * hidden_dim, hidden_dim) self.linear2_1 = nn.Linear(hidden_dim, hidden_dim) self.linear3_1 = nn.Linear(hidden_dim, 1) # Post state-action merge self.linear1_2 = nn.Linear(2 * hidden_dim, hidden_dim) self.linear2_2 = nn.Linear(hidden_dim, hidden_dim) self.linear3_2 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, state, action): # Process the state bn1, bn2, bn3 = self.bn1, self.bn2, self.bn3 conv1 = F.relu(bn1(self.conv1(state))) conv2 = F.relu(bn2(self.conv2(conv1))) conv3 = F.relu(bn3(self.conv3(conv2))) final_conv = conv3.view(-1, self.final_linear_size) final_conv = F.relu(self.final_linear(final_conv)) # Process the action x0 = F.relu(self.linear_act1(action)) x0 = F.relu(self.linear_act2(x0)) x0 = self.linear_act3(x0) # Concat xu = torch.cat([final_conv, x0], 1) # Apply a few more FC layers in two branches x1 = F.relu(self.linear1_1(xu)) x1 = F.relu(self.linear2_1(x1)) x1 = self.linear3_1(x1) x2 = F.relu(self.linear1_2(xu)) x2 = F.relu(self.linear2_2(x2)) x2 = self.linear3_2(x2) return x1, x2 # Q_risk network architecture class QNetworkConstraint(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim): super(QNetworkConstraint, self).__init__() self.bn1 = nn.BatchNorm1d(num_inputs + num_actions) # Q1 architecture self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.linear3 = nn.Linear(hidden_dim, 1) # Q2 architecture self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim) self.linear5 = nn.Linear(hidden_dim, hidden_dim) self.linear6 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, state, action): xu = torch.cat([state, action], 1) x1 = F.relu(self.linear1(xu)) x1 = F.relu(self.linear2(x1)) x1 = F.sigmoid(self.linear3(x1)) x2 = F.relu(self.linear4(xu)) x2 = F.relu(self.linear5(x2)) x2 = F.sigmoid(self.linear6(x2)) return x1, x2 # Q_risk network architecture for image observations class QNetworkConstraintCNN(nn.Module): def __init__(self, observation_space, num_actions, hidden_dim, env_name): super(QNetworkConstraintCNN, self).__init__() # Process the state self.conv1 = nn.Conv2d(observation_space[-1], 128, kernel_size=3, stride=2, padding=1, bias=True) self.conv2 = nn.Conv2d(128, 64, kernel_size=3, stride=2, padding=1, bias=True) self.conv3 = nn.Conv2d(64, 16, kernel_size=3, stride=2, padding=1, bias=True) self.bn1 = nn.BatchNorm2d(128) self.bn2 = nn.BatchNorm2d(64) self.bn3 = nn.BatchNorm2d(16) self.demo_bn1 = nn.BatchNorm2d(128) self.demo_bn2 = nn.BatchNorm2d(64) self.demo_bn3 = nn.BatchNorm2d(16) if 'shelf' in env_name: self.final_linear_size = 768 elif 'maze' in env_name: self.final_linear_size = 1024 elif "reach" in env_name: self.final_linear_size = 640 else: assert (False) self.final_linear = nn.Linear(self.final_linear_size, hidden_dim) # Process the action self.linear_act1 = nn.Linear(num_actions, hidden_dim) self.linear_act2 = nn.Linear(hidden_dim, hidden_dim) self.linear_act3 = nn.Linear(hidden_dim, hidden_dim) # Q1 architecture # Post state-action merge self.linear1_1 = nn.Linear(2 * hidden_dim, hidden_dim) self.linear2_1 = nn.Linear(hidden_dim, hidden_dim) self.linear3_1 = nn.Linear(hidden_dim, 1) # Post state-action merge self.linear1_2 = nn.Linear(2 * hidden_dim, hidden_dim) self.linear2_2 = nn.Linear(hidden_dim, hidden_dim) self.linear3_2 = nn.Linear(hidden_dim, 1) self.apply(weights_init_) def forward(self, state, action): # Process the state bn1, bn2, bn3 = self.bn1, self.bn2, self.bn3 conv1 = F.relu(bn1(self.conv1(state))) conv2 = F.relu(bn2(self.conv2(conv1))) conv3 = F.relu(bn3(self.conv3(conv2))) final_conv = conv3.view(-1, self.final_linear_size) final_conv = F.relu(self.final_linear(final_conv)) # Process the action x0 = F.relu(self.linear_act1(action)) x0 = F.relu(self.linear_act2(x0)) x0 = self.linear_act3(x0) # Concat xu = torch.cat([final_conv, x0], 1) # Apply a few more FC layers in two branches x1 = F.relu(self.linear1_1(xu)) x1 = F.relu(self.linear2_1(x1)) x1 = F.sigmoid(self.linear3_1(x1)) x2 = F.relu(self.linear1_2(xu)) x2 = F.relu(self.linear2_2(x2)) x2 = F.sigmoid(self.linear3_2(x2)) return x1, x2 # Gaussian policy for SAC class GaussianPolicy(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None): super(GaussianPolicy, self).__init__() self.linear1 = nn.Linear(num_inputs, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.mean_linear = nn.Linear(hidden_dim, num_actions) self.log_std_linear = nn.Linear(hidden_dim, num_actions) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = torch.tensor(1.) self.action_bias = torch.tensor(0.) else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): x = F.relu(self.linear1(state)) x = F.relu(self.linear2(x)) mean = self.mean_linear(x) log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX) return mean, log_std def sample(self, state): mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) x_t = normal.rsample( ) # for reparameterization trick (mean + std * N(0,1)) y_t = torch.tanh(x_t) action = y_t * self.action_scale + self.action_bias log_prob = normal.log_prob(x_t) # Enforcing Action Bound log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon) log_prob = log_prob.sum(1, keepdim=True) mean = torch.tanh(mean) * self.action_scale + self.action_bias return action, log_prob, mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) return super(GaussianPolicy, self).to(device) # Gaussian policy for SAC for image observations class GaussianPolicyCNN(nn.Module): def __init__(self, observation_space, num_actions, hidden_dim, env_name, action_space=None): super(GaussianPolicyCNN, self).__init__() # Process via a CNN and then collapse to linear self.conv1 = nn.Conv2d(observation_space[-1], 128, kernel_size=3, stride=2, padding=1, bias=True) self.conv2 = nn.Conv2d(128, 64, kernel_size=3, stride=2, padding=1, bias=True) self.conv3 = nn.Conv2d(64, 16, kernel_size=3, stride=2, padding=1, bias=True) self.bn1 = nn.BatchNorm2d(128) self.bn2 = nn.BatchNorm2d(64) self.bn3 = nn.BatchNorm2d(16) self.demo_bn1 = nn.BatchNorm2d(128) self.demo_bn2 = nn.BatchNorm2d(64) self.demo_bn3 = nn.BatchNorm2d(16) if 'shelf' in env_name: self.linear_dim = 768 elif 'maze' in env_name: self.linear_dim = 1024 elif "reach" in env_name: self.linear_dim = 640 else: assert (False) self.linear1 = nn.Linear(self.linear_dim, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.mean_linear = nn.Linear(hidden_dim, num_actions) self.log_std_linear = nn.Linear(hidden_dim, num_actions) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = torch.tensor(1.) self.action_bias = torch.tensor(0.) else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): # Process the state bn1, bn2, bn3 = self.bn1, self.bn2, self.bn3 conv1 = F.relu(bn1(self.conv1(state))) conv2 = F.relu(bn2(self.conv2(conv1))) conv3 = F.relu(bn3(self.conv3(conv2))) final_conv = conv3.view(-1, self.linear_dim) # Now do normal SAC stuff x = F.relu(self.linear1(final_conv)) x = F.relu(self.linear2(x)) mean = self.mean_linear(x) log_std = self.log_std_linear(x) log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX) return mean, log_std def sample(self, state): mean, log_std = self.forward(state) std = log_std.exp() normal = Normal(mean, std) x_t = normal.rsample( ) # for reparameterization trick (mean + std * N(0,1)) y_t = torch.tanh(x_t) action = y_t * self.action_scale + self.action_bias log_prob = normal.log_prob(x_t) # Enforcing Action Bound log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon) log_prob = log_prob.sum(1, keepdim=True) mean = torch.tanh(mean) * self.action_scale + self.action_bias return action, log_prob, mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) return super(GaussianPolicyCNN, self).to(device) # Deterministic policy for model free recovery class DeterministicPolicy(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None): super(DeterministicPolicy, self).__init__() self.linear1 = nn.Linear(num_inputs, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.mean = nn.Linear(hidden_dim, num_actions) self.noise = torch.Tensor(num_actions) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = 1. self.action_bias = 0. else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): x = F.relu(self.linear1(state)) x = F.relu(self.linear2(x)) mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias return mean def sample(self, state): mean = self.forward(state) noise = self.noise.normal_(0., std=0.1) noise = noise.clamp(-0.25, 0.25) action = mean + noise return action, torch.tensor(0.), mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) self.noise = self.noise.to(device) return super(DeterministicPolicy, self).to(device) # Stochastic policy for model free recovery class StochasticPolicy(nn.Module): def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None): super(StochasticPolicy, self).__init__() self.linear1 = nn.Linear(num_inputs, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.mean = nn.Linear(hidden_dim, num_actions) self.log_std = torch.nn.Parameter( torch.as_tensor([np.log(0.1)] * num_actions)) self.min_log_std = np.log(1e-6) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = 1. self.action_bias = 0. else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): x = F.relu(self.linear1(state)) x = F.relu(self.linear2(x)) mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias #print(self.log_std) log_std = torch.clamp(self.log_std, min=self.min_log_std) log_std = log_std.unsqueeze(0).repeat([len(mean), 1]) std = torch.exp(log_std) return Normal(mean, std) def sample(self, state): dist = self.forward(state) action = dist.rsample() return action, dist.log_prob(action).sum(-1), dist.mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) return super(StochasticPolicy, self).to(device) # Deterministic policy for model free recovery for image observations class DeterministicPolicyCNN(nn.Module): def __init__(self, observation_space, num_actions, hidden_dim, env_name, action_space=None): super(DeterministicPolicyCNN, self).__init__() # Process via a CNN and then collapse to linear self.conv1 = nn.Conv2d(observation_space[-1], 128, kernel_size=3, stride=2, padding=1, bias=True) self.conv2 = nn.Conv2d(128, 64, kernel_size=3, stride=2, padding=1, bias=True) self.conv3 = nn.Conv2d(64, 16, kernel_size=3, stride=2, padding=1, bias=True) self.bn1 = nn.BatchNorm2d(128) self.bn2 = nn.BatchNorm2d(64) self.bn3 = nn.BatchNorm2d(16) self.demo_bn1 = nn.BatchNorm2d(128) self.demo_bn2 = nn.BatchNorm2d(64) self.demo_bn3 = nn.BatchNorm2d(16) if 'shelf' in env_name: self.linear_dim = 768 elif 'maze' in env_name: self.linear_dim = 1024 elif "reach" in env_name: self.linear_dim = 640 else: assert (False) self.linear1 = nn.Linear(self.linear_dim, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.mean = nn.Linear(hidden_dim, num_actions) self.noise = torch.Tensor(num_actions) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = 1. self.action_bias = 0. else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): # Process the state bn1, bn2, bn3 = self.bn1, self.bn2, self.bn3 conv1 = F.relu(bn1(self.conv1(state))) conv2 = F.relu(bn2(self.conv2(conv1))) conv3 = F.relu(bn3(self.conv3(conv2))) final_conv = conv3.view(-1, self.linear_dim) # Now do normal SAC stuff x = F.relu(self.linear1(final_conv)) x = F.relu(self.linear2(x)) mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias return mean def sample(self, state): mean = self.forward(state) noise = self.noise.normal_(0., std=0.1) noise = noise.clamp(-0.25, 0.25) action = mean + noise return action, torch.tensor(0.), mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) self.noise = self.noise.to(device) return super(DeterministicPolicyCNN, self).to(device) # Stochastic policy for model free recovery for image observations class StochasticPolicyCNN(nn.Module): def __init__(self, observation_space, num_actions, hidden_dim, env_name, action_space=None): super(StochasticPolicyCNN, self).__init__() # Process via a CNN and then collapse to linear self.conv1 = nn.Conv2d( observation_space[-1], 128, kernel_size=3, stride=2, padding=1, bias=True) self.conv2 = nn.Conv2d( 128, 64, kernel_size=3, stride=2, padding=1, bias=True) self.conv3 = nn.Conv2d( 64, 16, kernel_size=3, stride=2, padding=1, bias=True) self.bn1 = nn.BatchNorm2d(128) self.bn2 = nn.BatchNorm2d(64) self.bn3 = nn.BatchNorm2d(16) self.demo_bn1 = nn.BatchNorm2d(128) self.demo_bn2 = nn.BatchNorm2d(64) self.demo_bn3 = nn.BatchNorm2d(16) if 'shelf' in env_name: self.linear_dim = 768 elif 'maze' in env_name: self.linear_dim = 1024 elif "reach" in env_name: self.linear_dim = 640 else: assert (False) self.linear1 = nn.Linear(self.linear_dim, hidden_dim) self.linear2 = nn.Linear(hidden_dim, hidden_dim) self.mean = nn.Linear(hidden_dim, num_actions) self.log_std = torch.nn.Parameter( torch.as_tensor([0.0] * num_actions)) self.min_log_std = np.log(1e-6) self.apply(weights_init_) # action rescaling if action_space is None: self.action_scale = 1. self.action_bias = 0. else: self.action_scale = torch.FloatTensor( (action_space.high - action_space.low) / 2.) self.action_bias = torch.FloatTensor( (action_space.high + action_space.low) / 2.) def forward(self, state): # Process the state bn1, bn2, bn3 = self.bn1, self.bn2, self.bn3 conv1 = F.relu(bn1(self.conv1(state))) conv2 = F.relu(bn2(self.conv2(conv1))) conv3 = F.relu(bn3(self.conv3(conv2))) final_conv = conv3.view(-1, self.linear_dim) # Now do normal SAC stuff x = F.relu(self.linear1(final_conv)) x = F.relu(self.linear2(x)) mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias log_std = torch.clamp(self.log_std, min=self.min_log_std) log_std = log_std.unsqueeze(0).repeat([len(mean), 1]) std = torch.exp(log_std) return Normal(mean, std) def sample(self, state): dist = self.forward(state) action = dist.rsample() return action, dist.log_prob(action).sum(-1), dist.mean def to(self, device): self.action_scale = self.action_scale.to(device) self.action_bias = self.action_bias.to(device) return super(StochasticPolicyCNN, self).to(device) ''' Architectures for latent dynamics model for model-based recovery policy ''' # f_dyn, model of dynamics in latent space class TransitionModel(nn.Module): __constants__ = ['min_std_dev'] def __init__(self, hidden_size, action_size, activation_function='relu'): super().__init__() self.act_fn = getattr(F, activation_function) self.fc1 = nn.Linear(hidden_size + action_size, 128) self.fc2 = nn.Linear(128, 128) self.fc3 = nn.Linear(128, 128) self.fc4 = nn.Linear(128, hidden_size) def forward(self, prev_hidden, action): hidden = torch.cat([prev_hidden, action], dim=-1) trajlen, batchsize = hidden.size(0), hidden.size(1) hidden.view(-1, hidden.size(2)) hidden = self.act_fn(self.fc1(hidden)) hidden = self.act_fn(self.fc2(hidden)) hidden = self.act_fn(self.fc3(hidden)) hidden = self.fc4(hidden) hidden = hidden.view(trajlen, batchsize, -1) return hidden # Encoder class VisualEncoderAttn(nn.Module): __constants__ = ['embedding_size'] def __init__(self, env_name, hidden_size, activation_function='relu', ch=6): super().__init__() self.act_fn = getattr(F, activation_function) self.softmax = nn.Softmax(dim=2) self.sigmoid = nn.Sigmoid() self.ch = ch self.conv1 = nn.Conv2d(self.ch, 32, 4, stride=2) #3 self.conv1_1 = nn.Conv2d(32, 32, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(32, 64, 4, stride=2) self.conv2_1 = nn.Conv2d(64, 64, 3, stride=1, padding=1) self.conv3 = nn.Conv2d(64, 128, 4, stride=2) self.conv3_1 = nn.Conv2d(128, 128, 3, stride=1, padding=1) self.conv4 = nn.Conv2d(128, 256, 4, stride=2) self.conv4_1 = nn.Conv2d(256, 256, 3, stride=1, padding=1) if 'maze' in env_name: self.fc1 = nn.Linear(1024, 512) elif 'shelf' in env_name: self.fc1 = nn.Linear(512, 512) else: raise NotImplementedError("Needs to be maze or shelf") self.fc2 = nn.Linear(512, 2 * hidden_size) def forward(self, observation): trajlen, batchsize = observation.size(0), observation.size(1) self.width = observation.size(3) observation = observation.view(trajlen * batchsize, 3, self.width, 64) atn = torch.zeros_like(observation[:, :1]) hidden = self.act_fn(self.conv1(observation)) hidden = self.act_fn(self.conv1_1(hidden)) hidden = self.act_fn(self.conv2(hidden)) hidden = self.act_fn(self.conv2_1(hidden)) hidden = self.act_fn(self.conv3(hidden)) hidden = self.act_fn(self.conv3_1(hidden)) hidden = self.act_fn(self.conv4(hidden)) hidden = self.act_fn(self.conv4_1(hidden)) hidden = hidden.view(trajlen * batchsize, -1) hidden = self.act_fn(self.fc1(hidden)) hidden = self.fc2(hidden) hidden = hidden.view(trajlen, batchsize, -1) atn = atn.view(trajlen, batchsize, 1, self.width, 64) return hidden, atn # Decoder class VisualReconModel(nn.Module): __constants__ = ['embedding_size'] def __init__(self, env_name, hidden_size, activation_function='relu', action_len=5): super().__init__() self.act_fn = getattr(F, activation_function) self.fc1 = nn.Linear(hidden_size * 1, 128) self.fc2 = nn.Linear(128, 128) self.fc3 = nn.Linear(128, 128) self.sigmoid = nn.Sigmoid() if 'maze' in env_name: self.conv1 = nn.ConvTranspose2d(128, 128, 5, stride=2) self.conv2 = nn.ConvTranspose2d(128, 64, 5, stride=2) self.conv3 = nn.ConvTranspose2d(64, 32, 6, stride=2) self.conv4 = nn.ConvTranspose2d(32, 3, 6, stride=2) elif 'shelf' in env_name: self.conv1 = nn.ConvTranspose2d(128, 128, (4, 5), stride=2) self.conv2 = nn.ConvTranspose2d(128, 64, (4, 5), stride=2) self.conv3 = nn.ConvTranspose2d(64, 32, (5, 6), stride=2) self.conv4 = nn.ConvTranspose2d(32, 3, (4, 6), stride=2) else: raise NotImplementedError("Needs to be maze or shelf") def forward(self, hidden): trajlen, batchsize = hidden.size(0), hidden.size(1) hidden = hidden.view(trajlen * batchsize, -1) hidden = self.act_fn(self.fc1(hidden)) hidden = self.act_fn(self.fc2(hidden)) hidden = self.fc3(hidden) hidden = hidden.view(-1, 128, 1, 1) hidden = self.act_fn(self.conv1(hidden)) hidden = self.act_fn(self.conv2(hidden)) hidden = self.act_fn(self.conv3(hidden)) residual = self.sigmoid(self.conv4(hidden)) * 255.0 residual = residual.view(trajlen, batchsize, residual.size(1), residual.size(2), residual.size(3)) return residual
35.461171
79
0.575924
3,772
29,681
4.352863
0.07211
0.046044
0.030696
0.035203
0.855168
0.848834
0.831902
0.81089
0.803642
0.758816
0
0.047462
0.316398
29,681
836
80
35.503589
0.761755
0.061083
0
0.803543
0
0
0.006846
0
0
0
0
0
0.008052
1
0.066023
false
0
0.008052
0
0.140097
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
617ac35aeab614905af64421fa43fc947dd5d460
5,776
py
Python
biserici_inlemnite/app/migrations/0048_pozeelementesculptate_pozeicoanevechi_pozemobiliere_pozeobiectedecult_pozeobiecteinstrainate_pozepro.py
ck-tm/biserici-inlemnite
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
[ "MIT" ]
null
null
null
biserici_inlemnite/app/migrations/0048_pozeelementesculptate_pozeicoanevechi_pozemobiliere_pozeobiectedecult_pozeobiecteinstrainate_pozepro.py
ck-tm/biserici-inlemnite
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
[ "MIT" ]
null
null
null
biserici_inlemnite/app/migrations/0048_pozeelementesculptate_pozeicoanevechi_pozemobiliere_pozeobiectedecult_pozeobiecteinstrainate_pozepro.py
ck-tm/biserici-inlemnite
c9d12127b92f25d3ab2fcc7b4c386419fe308a4e
[ "MIT" ]
null
null
null
# Generated by Django 3.1.13 on 2021-09-27 11:44 from django.db import migrations, models import django.db.models.deletion import modelcluster.fields import wagtail.core.fields class Migration(migrations.Migration): dependencies = [ ('wagtailimages', '0023_add_choose_permissions'), ('app', '0047_pozefundatie_pozestructuracatei_pozestructuracheotoare_pozestructuramixt_pozetiranti'), ] operations = [ migrations.CreateModel( name='PozeProscomidie', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_proscomidie', to='app.componentaartisticapage')), ('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), migrations.CreateModel( name='PozeObiecteInstrainate', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_obiecte_instrainate', to='app.componentaartisticapage')), ('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), migrations.CreateModel( name='PozeObiecteDeCult', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_obiecte_de_cult', to='app.componentaartisticapage')), ('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), migrations.CreateModel( name='PozeMobiliere', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_mobiliere', to='app.componentaartisticapage')), ('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), migrations.CreateModel( name='PozeIcoaneVechi', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_icoane_vechi', to='app.componentaartisticapage')), ('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), migrations.CreateModel( name='PozeElementeSculptate', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('observatii', wagtail.core.fields.RichTextField(blank=True, null=True, verbose_name='Observații')), ('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='poze_elemente_sculptate', to='app.componentaartisticapage')), ('poza', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.image')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), ]
56.627451
178
0.615305
563
5,776
6.161634
0.159858
0.046699
0.052465
0.082445
0.851254
0.851254
0.851254
0.851254
0.851254
0.851254
0
0.005487
0.242729
5,776
101
179
57.188119
0.787609
0.007964
0
0.694737
1
0
0.18139
0.064246
0
0
0
0
0
1
0
false
0
0.042105
0
0.073684
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
618a9bf7e2b2917637c6964d4c1a933241f9717f
369
py
Python
util.py
nimble0/advanced-steno-dictionary
e2c7e4b7d6317ced15ba14ca24df36e56ca4c393
[ "Apache-2.0" ]
1
2017-12-03T21:51:20.000Z
2017-12-03T21:51:20.000Z
util.py
nimble0/advanced-steno-dictionary
e2c7e4b7d6317ced15ba14ca24df36e56ca4c393
[ "Apache-2.0" ]
null
null
null
util.py
nimble0/advanced-steno-dictionary
e2c7e4b7d6317ced15ba14ca24df36e56ca4c393
[ "Apache-2.0" ]
null
null
null
import re def single_quote_str(string): return "'" + re.sub( r"(?P<match_char>\'|\\)", "\\\\\\g<match_char>", string) + "'" def double_quote_str(string): return "\"" + re.sub( r"(?P<match_char>\"|\\)", "\\\\\\g<match_char>", string) + "\"" def unquote_str(string): return string[1:-1].replace("\\'", "'")
20.5
43
0.485095
43
369
3.953488
0.418605
0.211765
0.264706
0.235294
0.647059
0.647059
0.647059
0.647059
0.647059
0.647059
0
0.007246
0.252033
369
17
44
21.705882
0.608696
0
0
0.153846
0
0
0.224932
0.056911
0
0
0
0
0
1
0.230769
false
0
0.076923
0.230769
0.538462
0
0
0
0
null
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
7
618e7ee9c0746e63210857221ef350cdd3ed831d
122
py
Python
kalite_gtk/exceptions.py
benjaoming/ka-lite-gtk
cb1a50de14036dee5a8376200d5030ccc8adc85a
[ "BSD-3-Clause" ]
null
null
null
kalite_gtk/exceptions.py
benjaoming/ka-lite-gtk
cb1a50de14036dee5a8376200d5030ccc8adc85a
[ "BSD-3-Clause" ]
null
null
null
kalite_gtk/exceptions.py
benjaoming/ka-lite-gtk
cb1a50de14036dee5a8376200d5030ccc8adc85a
[ "BSD-3-Clause" ]
null
null
null
from __future__ import print_function from __future__ import unicode_literals class ValidationError(Exception): pass
20.333333
39
0.844262
14
122
6.642857
0.785714
0.215054
0.344086
0
0
0
0
0
0
0
0
0
0.131148
122
5
40
24.4
0.877358
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0.25
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
8
9c9de471a4198efaeba940d83562639d4711d881
7,191
py
Python
selforgmap/selforgmap.py
saifuddin778/selforgmap
3f99363018b27c71fbe77482e51077fd2f67a26f
[ "MIT" ]
1
2016-06-30T22:09:37.000Z
2016-06-30T22:09:37.000Z
selforgmap/selforgmap.py
saifuddin778/selforgmap
3f99363018b27c71fbe77482e51077fd2f67a26f
[ "MIT" ]
null
null
null
selforgmap/selforgmap.py
saifuddin778/selforgmap
3f99363018b27c71fbe77482e51077fd2f67a26f
[ "MIT" ]
1
2018-12-10T20:37:46.000Z
2018-12-10T20:37:46.000Z
import math from random import random as rnd from nodes import Nodes from decay import Decay from distances import Distances from neighborhoods import square_ class SOMSupervised(object): """the standard supervised version of som operating on voting principles""" def __init__(self, n, m, lr=False): self.n = n self.m = m self.lr = 0.5 if not lr else lr self.nodes, self.pad = Nodes(n, m).create_nodes() self.decay = Decay(self.n, self.lr) self.metric = Distances().euclidian self.smp_count = 1 self.bmus = {} def pick_(self, pcount): """picks a random sample from dataset""" return [int(rnd() * pcount) for _ in xrange(self.smp_count)] def bmu(self, item): """gets the best bmu (based on metric)""" min_d = float('infinity') candidate = None for node in self.nodes: if self.metric(item, node.w) < min_d: min_d = self.metric(item, node.w) candidate = node return candidate def get_neighbors(self, bmu, n_nghbs): """returns immediate neighbors of bmu under n_nhgbs distance""" n_items = range(1, n_nghbs + 1) p_obj = { 'p_i': bmu.i, 'p_x': bmu.x, 'p_y': bmu.y, 'min_x': bmu.x, 'min_y': bmu.y, 'max_x': bmu.x, 'max_y': bmu.y } for i in n_items: k = i nxmin = math.floor(p_obj['p_x'] - (self.pad * k)) nxmax = math.ceil(p_obj['p_x'] + (self.pad * k)) nymin = math.floor(p_obj['p_y'] - (self.pad * k)) nymax = math.ceil(p_obj['p_y'] + (self.pad * k)) if nxmin < p_obj['p_x']: p_obj['min_x'] = nxmin if nxmax > p_obj['max_x']: p_obj['max_x'] = nxmax if nymin < p_obj['p_y']: p_obj['min_y'] = nymin if nymax > p_obj['p_y']: p_obj['max_y'] = nymax neighbors = square_(p_obj, self.nodes) return neighbors def update_nodes(self, neighbors, item, label, bmu): """updates neighbors of bmu""" for node in neighbors: distance = float(abs(bmu.x - node.x) + abs(bmu.y - node.y)) / 2 force = 1 / max([distance, 1]) for i, j in enumerate(node.w): node.w[i] = node.w[i] + force * (item[i] - node.w[i]) node.fcount[label] = force return def predict(self, x): """predicts based on the voting method""" f = {} bmu = self.bmu(x) neighbors = self.get_neighbors(bmu, 2) for each in neighbors: winner = max(each.fcount, key=lambda n: each.fcount[n]) f[winner] = f.get(winner, 0) + 1 return max(f, key=lambda n: f[n]) def get_nodes(self): return map(lambda n: {'weight': n.w, 'x': n.x, 'y': n.y, 'hcount': n.__dict__.get('bmu_count', 0)}, self.nodes) def get_bmus(self): bmus = filter(lambda n: n.i in self.bmus, self.nodes) return map(lambda n: {'weight': n.w, 'x': n.x, 'y': n.y, 'hcount': n.__dict__.get('bmu_count', 0)}, bmus) def train(self, data, labels): """main training method to build clusters in som grid""" n_nghbs = self.n pcount = len(data) t = 0 while n_nghbs > 1: samples = self.pick_(pcount) for sample in samples: item = data[sample] label = labels[sample] bmu = self.bmu(item) bmu.bmu_count += 1 self.bmus[bmu.i] = 1 n_nghbs = int(self.decay.exp(t) / 2) neighbors = self.get_neighbors(bmu, n_nghbs) self.update_nodes(neighbors, item, label, bmu) self.bmus[bmu.i] = True t += 1 return class SOM(object): """the standard version of som""" def __init__(self, n, m, lr=False): self.n = n self.m = m self.lr = 0.5 if not lr else lr self.nodes, self.pad = Nodes(n, m).create_nodes() self.decay = Decay(self.n, self.lr) self.metric = Distances().euclidian self.smp_count = 1 self.bmus = {} def pick_(self, pcount): """picks a random sample from dataset""" return [int(rnd() * pcount) for _ in xrange(self.smp_count)] def bmu(self, item): """gets the best bmu (based on metric)""" min_d = float('infinity') candidate = None for node in self.nodes: if self.metric(item, node.w) < min_d: min_d = self.metric(item, node.w) candidate = node return candidate def get_neighbors(self, bmu, n_nghbs): """returns immediate neighbors of bmu under n_nhgbs distance""" n_items = range(1, n_nghbs + 1) p_obj = { 'p_i': bmu.i, 'p_x': bmu.x, 'p_y': bmu.y, 'min_x': bmu.x, 'min_y': bmu.y, 'max_x': bmu.x, 'max_y': bmu.y } for i in n_items: k = i nxmin = math.floor(p_obj['p_x'] - (self.pad * k)) nxmax = math.ceil(p_obj['p_x'] + (self.pad * k)) nymin = math.floor(p_obj['p_y'] - (self.pad * k)) nymax = math.ceil(p_obj['p_y'] + (self.pad * k)) if nxmin < p_obj['p_x']: p_obj['min_x'] = nxmin if nxmax > p_obj['max_x']: p_obj['max_x'] = nxmax if nymin < p_obj['p_y']: p_obj['min_y'] = nymin if nymax > p_obj['p_y']: p_obj['max_y'] = nymax neighbors = square_(p_obj, self.nodes) return neighbors def update_nodes(self, neighbors, item, bmu): """updates neighbors of bmu""" for node in neighbors: distance = float(abs(bmu.x - node.x) + abs(bmu.y - node.y)) / 2 force = 1 / max([distance, 1]) for i, j in enumerate(node.w): node.w[i] = node.w[i] + force * (item[i] - node.w[i]) return def get_nodes(self): return map(lambda n: {'weight': n.w, 'x': n.x, 'y': n.y, 'hcount': n.__dict__.get('bmu_count', 0)}, self.nodes) def get_bmus(self): bmus = filter(lambda n: n.i in self.bmus, self.nodes) return map(lambda n: {'weight': n.w, 'x': n.x, 'y': n.y, 'hcount': n.__dict__.get('bmu_count', 0)}, bmus) def train(self, data): """main training method to build clusters in som grid""" n_nghbs = self.n pcount = len(data) t = 0 while n_nghbs > 1: samples = self.pick_(pcount) for sample in samples: item = data[sample] bmu = self.bmu(item) bmu.bmu_count += 1 self.bmus[bmu.i] = 1 n_nghbs = int(self.decay.exp(t) / 2) neighbors = self.get_neighbors(bmu, n_nghbs) self.update_nodes(neighbors, item, bmu) self.bmus[bmu.i] = True t += 1 return
33.291667
119
0.507996
1,015
7,191
3.453202
0.126108
0.031954
0.022825
0.013695
0.865621
0.857632
0.857632
0.857632
0.857632
0.842225
0
0.007564
0.356557
7,191
216
120
33.291667
0.749946
0.075511
0
0.845238
0
0
0.039192
0
0
0
0
0
0
1
0.10119
false
0
0.035714
0.011905
0.238095
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
147345761c73466860d6afd61f01dd3e79ccb707
14,980
py
Python
src/genie/libs/parser/iosxr/tests/ShowRouteIpv6/cli/equal/golden_outpu_3_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
204
2018-06-27T00:55:27.000Z
2022-03-06T21:12:18.000Z
src/genie/libs/parser/iosxr/tests/ShowRouteIpv6/cli/equal/golden_outpu_3_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
468
2018-06-19T00:33:18.000Z
2022-03-31T23:23:35.000Z
src/genie/libs/parser/iosxr/tests/ShowRouteIpv6/cli/equal/golden_outpu_3_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
309
2019-01-16T20:21:07.000Z
2022-03-30T12:56:41.000Z
expected_output = { 'vrf': { 'default': { 'address_family': { 'ipv6': { 'routes': { '2001:db8:1234::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:1234::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:1579::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:1579::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:1981::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:1981::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:2222::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:2222::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:3456::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:3456::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:4021::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:4021::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:5354::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:5354::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:5555::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:5555::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:6666::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:6666::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:7654::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:7654::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:7777::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:7777::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:9843::8/128': { 'active': True, 'metric': 1, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::5054:ff:fef2:a625', 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:03:00' } } }, 'route': '2001:db8:9843::8/128', 'route_preference': 110, 'source_protocol': 'ospf', 'source_protocol_codes': 'O' }, '2001:db8:abcd::/64': { 'active': True, 'next_hop': { 'outgoing_interface': { 'GigabitEthernet0/0/0/1': { 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:07:43' } } }, 'route': '2001:db8:abcd::/64', 'source_protocol': 'connected', 'source_protocol_codes': 'C' }, '2001:db8:abcd::1/128': { 'active': True, 'next_hop': { 'outgoing_interface': { 'GigabitEthernet0/0/0/1': { 'outgoing_interface': 'GigabitEthernet0/0/0/1', 'updated': '00:07:43' } } }, 'route': '2001:db8:abcd::1/128', 'source_protocol': 'local', 'source_protocol_codes': 'L' }, '2001:db8:50e0:7b33:5054:ff:fe43:e2ee/128': { 'active': True, 'next_hop': { 'outgoing_interface': { 'MgmtEth0/RP0/CPU0/0': { 'outgoing_interface': 'MgmtEth0/RP0/CPU0/0', 'updated': '00:08:31' } } }, 'route': '2001:db8:50e0:7b33:5054:ff:fe43:e2ee/128', 'source_protocol': 'local', 'source_protocol_codes': 'L' }, '2001:db8:50e0:7b33::/64': { 'active': True, 'next_hop': { 'outgoing_interface': { 'MgmtEth0/RP0/CPU0/0': { 'outgoing_interface': 'MgmtEth0/RP0/CPU0/0', 'updated': '00:08:31' } } }, 'route': '2001:db8:50e0:7b33::/64', 'source_protocol': 'connected', 'source_protocol_codes': 'C' }, '::/0': { 'active': True, 'metric': 0, 'next_hop': { 'next_hop_list': { 1: { 'index': 1, 'next_hop': 'fe80::10ff:fe04:209e', 'outgoing_interface': 'MgmtEth0/RP0/CPU0/0', 'updated': '00:08:31' } } }, 'route': '::/0', 'route_preference': 2, 'source_protocol': 'application route', 'source_protocol_codes': 'a*' } } }, }, 'last_resort': { 'gateway': 'fe80::10ff:fe04:209e', 'to_network': '::' }, }, } }
48.478964
87
0.242724
817
14,980
4.27295
0.097919
0.086222
0.05729
0.155829
0.945574
0.912346
0.912346
0.910341
0.88456
0.87568
0
0.158222
0.656142
14,980
308
88
48.636364
0.519511
0
0
0.598039
0
0
0.255041
0.074175
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
215308e883130c3205af9eda80637819d2108fc3
172
py
Python
zametki/termcolor_test.py
anokata/pythonPetProjects
245c3ff11ae560b17830970061d8d60013948fd7
[ "MIT" ]
3
2017-04-30T17:44:53.000Z
2018-02-03T06:02:11.000Z
zametki/termcolor_test.py
anokata/pythonPetProjects
245c3ff11ae560b17830970061d8d60013948fd7
[ "MIT" ]
10
2021-03-18T20:17:19.000Z
2022-03-11T23:14:19.000Z
zametki/termcolor_test.py
anokata/pythonPetProjects
245c3ff11ae560b17830970061d8d60013948fd7
[ "MIT" ]
null
null
null
from termcolor import cprint cprint('Hello, World!', 'green', attrs=['dark']) cprint('Hello, World!', 'green', attrs=[]) cprint('Hello, World!', 'green', attrs=['bold'])
24.571429
48
0.645349
21
172
5.285714
0.47619
0.297297
0.432432
0.567568
0.702703
0
0
0
0
0
0
0
0.104651
172
6
49
28.666667
0.720779
0
0
0
0
0
0.362573
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
1
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
7
21533dfa9878b6f54236c22331c952f779f64003
85
py
Python
modules/decoders/__init__.py
valdersoul/bn-vae
b248193708e28f7314ba8f774d2112f0b7c69ab2
[ "MIT" ]
22
2020-06-01T12:51:47.000Z
2022-01-21T10:46:37.000Z
modules/decoders/__init__.py
valdersoul/bn-vae
b248193708e28f7314ba8f774d2112f0b7c69ab2
[ "MIT" ]
6
2020-08-14T06:56:58.000Z
2021-08-17T02:43:04.000Z
modules/decoders/__init__.py
valdersoul/bn-vae
b248193708e28f7314ba8f774d2112f0b7c69ab2
[ "MIT" ]
3
2020-11-24T01:06:56.000Z
2021-11-15T10:11:56.000Z
from .dec_lstm import * #from .dec_pixelcnn import * #from .dec_pixelcnn_v2 import *
21.25
31
0.764706
13
85
4.692308
0.461538
0.344262
0.42623
0.688525
0
0
0
0
0
0
0
0.013699
0.141176
85
3
32
28.333333
0.821918
0.670588
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
dcc311c29358140b14d200a98258f9d1c74acbe6
683
py
Python
docs/generate.py
ydcjeff/api-extractor
0c8b22d75f21d08c3e7601e1bf15a37963742516
[ "MIT" ]
1
2021-07-03T18:42:38.000Z
2021-07-03T18:42:38.000Z
docs/generate.py
ydcjeff/api-extractor
0c8b22d75f21d08c3e7601e1bf15a37963742516
[ "MIT" ]
1
2022-02-04T14:42:25.000Z
2022-02-06T02:21:44.000Z
docs/generate.py
ydcjeff/api-extractor
0c8b22d75f21d08c3e7601e1bf15a37963742516
[ "MIT" ]
null
null
null
from api_extractor import ( code_fence, format_base_cls, format_docstring, format_heading, format_name_and_signature, generate, get_base_cls, get_public_members, get_signature, is_property, render, transform_docstring, typeof, write_to_file, ) pages = { 'api.md': { 'title': 'API Reference', 'content': [ code_fence, format_base_cls, format_docstring, format_heading, format_name_and_signature, generate, get_base_cls, get_public_members, get_signature, is_property, render, transform_docstring, typeof, write_to_file, ], }, } generate(pages, './docs/')
17.075
32
0.653001
76
683
5.407895
0.421053
0.068127
0.072993
0.092457
0.822384
0.822384
0.822384
0.822384
0.822384
0.822384
0
0
0.259151
683
39
33
17.512821
0.812253
0
0
0.736842
1
0
0.055637
0
0
0
0
0
0
1
0
false
0
0.026316
0
0.026316
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
dce8553b603830e27b0eb4462072dc618301f7a5
43
py
Python
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/Python_Booleans_2.txt.py
webdevhub42/Lambda
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
[ "MIT" ]
null
null
null
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/Python_Booleans_2.txt.py
webdevhub42/Lambda
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
[ "MIT" ]
null
null
null
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/Python_Booleans_2.txt.py
webdevhub42/Lambda
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
[ "MIT" ]
null
null
null
print(10 > 9) print(10 == 9) print(10 < 9)
10.75
14
0.55814
9
43
2.666667
0.333333
0.875
1
1.083333
1
1
0
0
0
0
0
0.264706
0.209302
43
3
15
14.333333
0.441176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
1
1
1
1
1
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
12
b49e9c5f3d2732bb1c905bfbce8e9e780e974a01
9,423
py
Python
accelbyte_py_sdk/api/platform/wrappers/_currency.py
AccelByte/accelbyte-python-sdk
dcd311fad111c59da828278975340fb92e0f26f7
[ "MIT" ]
null
null
null
accelbyte_py_sdk/api/platform/wrappers/_currency.py
AccelByte/accelbyte-python-sdk
dcd311fad111c59da828278975340fb92e0f26f7
[ "MIT" ]
1
2021-10-13T03:46:58.000Z
2021-10-13T03:46:58.000Z
accelbyte_py_sdk/api/platform/wrappers/_currency.py
AccelByte/accelbyte-python-sdk
dcd311fad111c59da828278975340fb92e0f26f7
[ "MIT" ]
null
null
null
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved. # This is licensed software from AccelByte Inc, for limitations # and restrictions contact your company contract manager. # # Code generated. DO NOT EDIT! # template file: justice_py_sdk_codegen/__main__.py # pylint: disable=duplicate-code # pylint: disable=line-too-long # pylint: disable=missing-function-docstring # pylint: disable=missing-function-docstring # pylint: disable=missing-module-docstring # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-lines # pylint: disable=too-many-locals # pylint: disable=too-many-public-methods # pylint: disable=too-many-return-statements # pylint: disable=too-many-statements # pylint: disable=unused-import from typing import Any, Dict, List, Optional, Tuple, Union from ....core import HeaderStr from ....core import get_namespace as get_services_namespace from ....core import run_request from ....core import run_request_async from ....core import same_doc_as from ..models import CurrencyConfig from ..models import CurrencyCreate from ..models import CurrencyInfo from ..models import CurrencySummary from ..models import CurrencyUpdate from ..models import ErrorEntity from ..models import ValidationErrorEntity from ..operations.currency import CreateCurrency from ..operations.currency import DeleteCurrency from ..operations.currency import GetCurrencyConfig from ..operations.currency import GetCurrencySummary from ..operations.currency import ListCurrencies from ..operations.currency import ListCurrenciesCurrencyTypeEnum from ..operations.currency import PublicListCurrencies from ..operations.currency import PublicListCurrenciesCurrencyTypeEnum from ..operations.currency import UpdateCurrency from ..models import CurrencyCreateCurrencyTypeEnum from ..models import CurrencyInfoCurrencyTypeEnum from ..models import CurrencySummaryCurrencyTypeEnum @same_doc_as(CreateCurrency) def create_currency(body: Optional[CurrencyCreate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = CreateCurrency.create( body=body, namespace=namespace, ) return run_request(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(CreateCurrency) async def create_currency_async(body: Optional[CurrencyCreate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = CreateCurrency.create( body=body, namespace=namespace, ) return await run_request_async(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(DeleteCurrency) def delete_currency(currency_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = DeleteCurrency.create( currency_code=currency_code, namespace=namespace, ) return run_request(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(DeleteCurrency) async def delete_currency_async(currency_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = DeleteCurrency.create( currency_code=currency_code, namespace=namespace, ) return await run_request_async(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(GetCurrencyConfig) def get_currency_config(currency_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = GetCurrencyConfig.create( currency_code=currency_code, namespace=namespace, ) return run_request(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(GetCurrencyConfig) async def get_currency_config_async(currency_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = GetCurrencyConfig.create( currency_code=currency_code, namespace=namespace, ) return await run_request_async(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(GetCurrencySummary) def get_currency_summary(currency_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = GetCurrencySummary.create( currency_code=currency_code, namespace=namespace, ) return run_request(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(GetCurrencySummary) async def get_currency_summary_async(currency_code: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = GetCurrencySummary.create( currency_code=currency_code, namespace=namespace, ) return await run_request_async(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(ListCurrencies) def list_currencies(currency_type: Optional[Union[str, ListCurrenciesCurrencyTypeEnum]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = ListCurrencies.create( currency_type=currency_type, namespace=namespace, ) return run_request(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(ListCurrencies) async def list_currencies_async(currency_type: Optional[Union[str, ListCurrenciesCurrencyTypeEnum]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = ListCurrencies.create( currency_type=currency_type, namespace=namespace, ) return await run_request_async(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(PublicListCurrencies) def public_list_currencies(currency_type: Optional[Union[str, PublicListCurrenciesCurrencyTypeEnum]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = PublicListCurrencies.create( currency_type=currency_type, namespace=namespace, ) return run_request(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(PublicListCurrencies) async def public_list_currencies_async(currency_type: Optional[Union[str, PublicListCurrenciesCurrencyTypeEnum]] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = PublicListCurrencies.create( currency_type=currency_type, namespace=namespace, ) return await run_request_async(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(UpdateCurrency) def update_currency(currency_code: str, body: Optional[CurrencyUpdate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = UpdateCurrency.create( currency_code=currency_code, body=body, namespace=namespace, ) return run_request(request, additional_headers=x_additional_headers, **kwargs) @same_doc_as(UpdateCurrency) async def update_currency_async(currency_code: str, body: Optional[CurrencyUpdate] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs): if namespace is None: namespace, error = get_services_namespace() if error: return None, error request = UpdateCurrency.create( currency_code=currency_code, body=body, namespace=namespace, ) return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
39.927966
219
0.737875
1,076
9,423
6.252788
0.106877
0.106124
0.074911
0.049941
0.743014
0.732759
0.732759
0.728597
0.708977
0.708977
0
0.000514
0.173618
9,423
235
220
40.097872
0.86349
0.08129
0
0.707182
1
0
0
0
0
0
0
0
0
1
0.038674
false
0
0.138122
0
0.331492
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
b4a95734a04c469829ca22143a04925d20169f13
6,016
py
Python
darknet.py
tccfree/yolov1
cbe5fc7c479cdc2cc94f0ed21ad3f39fe25bb26c
[ "MIT" ]
87
2019-03-22T03:43:23.000Z
2022-03-07T05:10:00.000Z
darknet.py
beelze-b/yolo_v1_pytorch
e17c1cae1c48333970c69a2cfbdd67ae83c118ff
[ "MIT" ]
12
2020-04-27T05:04:06.000Z
2022-02-10T00:15:43.000Z
darknet.py
beelze-b/yolo_v1_pytorch
e17c1cae1c48333970c69a2cfbdd67ae83c118ff
[ "MIT" ]
40
2020-04-23T04:49:20.000Z
2022-03-06T22:50:40.000Z
import torch import torch.nn as nn import torch.nn.functional as F from util_layers import Squeeze class DarkNet(nn.Module): def __init__(self, conv_only=False, bn=True, init_weight=True): super(DarkNet, self).__init__() # Make layers self.features = self._make_conv_bn_layers() if bn else self._make_conv_layers() if not conv_only: self.fc = self._make_fc_layers() # Initialize weights if init_weight: self._initialize_weights() self.conv_only = conv_only def forward(self, x): x = self.features(x) if not self.conv_only: x = self.fc(x) return x def _make_conv_bn_layers(self): conv = nn.Sequential( nn.Conv2d(3, 64, 7, stride=2, padding=3), nn.BatchNorm2d(64), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(64, 192, 3, padding=1), nn.BatchNorm2d(192), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(192, 128, 1), nn.BatchNorm2d(128), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(128, 256, 3, padding=1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 256, 1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(512, 256, 1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 256, 1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 256, 1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 256, 1), nn.BatchNorm2d(256), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 512, 1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 1024, 3, padding=1), nn.BatchNorm2d(1024), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(1024, 512, 1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 1024, 3, padding=1), nn.BatchNorm2d(1024), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(1024, 512, 1), nn.BatchNorm2d(512), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 1024, 3, padding=1), nn.BatchNorm2d(1024), nn.LeakyReLU(0.1, inplace=True) ) return conv def _make_conv_layers(self): conv = nn.Sequential( nn.Conv2d(3, 64, 7, stride=2, padding=3), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(64, 192, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(192, 128, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(128, 256, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 256, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(512, 256, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 256, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 256, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 256, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(256, 512, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 512, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 1024, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2), nn.Conv2d(1024, 512, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 1024, 3, padding=1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(1024, 512, 1), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(512, 1024, 3, padding=1), nn.LeakyReLU(0.1, inplace=True) ) return conv def _make_fc_layers(self): fc = nn.Sequential( nn.AvgPool2d(7), Squeeze(), nn.Linear(1024, 1000) ) return fc def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0)
34.377143
91
0.511968
780
6,016
3.885897
0.094872
0.108215
0.158364
0.171561
0.765424
0.761795
0.755196
0.755196
0.755196
0.755196
0
0.138882
0.348903
6,016
174
92
34.574713
0.634925
0.004987
0
0.732026
0
0
0.002674
0
0
0
0
0
0
1
0.039216
false
0
0.026144
0
0.098039
0
0
0
0
null
0
0
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
37138ae157ee742315a6536aec9c46684a4235b6
4,445
py
Python
test/unit/test_bpgcoind_data_shims.py
bpg-project/Sentinel
73de2fcbd4df7d46e042199ee530e0cc242ab5a3
[ "MIT" ]
null
null
null
test/unit/test_bpgcoind_data_shims.py
bpg-project/Sentinel
73de2fcbd4df7d46e042199ee530e0cc242ab5a3
[ "MIT" ]
null
null
null
test/unit/test_bpgcoind_data_shims.py
bpg-project/Sentinel
73de2fcbd4df7d46e042199ee530e0cc242ab5a3
[ "MIT" ]
1
2018-10-18T21:31:02.000Z
2018-10-18T21:31:02.000Z
import pytest import sys import os os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf')) sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib'))) import bpgcoinlib @pytest.fixture def sentinel_proposal_hex(): return '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d' @pytest.fixture def sentinel_superblock_hex(): return '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d' @pytest.fixture def bpgcoind_proposal_hex(): return '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d5d' @pytest.fixture def bpgcoind_superblock_hex(): return '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d' # ======================================================================== def test_SHIM_deserialise_from_bpgcoind(bpgcoind_proposal_hex, bpgcoind_superblock_hex): assert bpgcoinlib.SHIM_deserialise_from_bpgcoind(bpgcoind_proposal_hex) == '5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313336383430302c20226e616d65223a2022626565722d7265696d62757273656d656e742d39222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2034392e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202275726c223a202268747470733a2f2f7777772e6461736863656e7472616c2e6f72672f702f626565722d7265696d62757273656d656e742d39227d5d' assert bpgcoinlib.SHIM_deserialise_from_bpgcoind(bpgcoind_superblock_hex) == '5b227375706572626c6f636b222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33227d5d' def test_SHIM_serialise_for_bpgcoind(sentinel_proposal_hex, sentinel_superblock_hex): assert bpgcoinlib.SHIM_serialise_for_bpgcoind(sentinel_proposal_hex) == '5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20313439313032323830302c20226e616d65223a2022626565722d7265696d62757273656d656e742d37222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a20372e30303030303030302c202273746172745f65706f6368223a20313438333235303430302c202274797065223a20312c202275726c223a202268747470733a2f2f6461736863656e7472616c2e636f6d2f626565722d7265696d62757273656d656e742d37227d5d5d' assert bpgcoinlib.SHIM_serialise_for_bpgcoind(sentinel_superblock_hex) == '5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2036323530302c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e7473223a2022357c33222c202274797065223a20327d5d5d'
113.974359
584
0.934758
135
4,445
30.377778
0.266667
0.008778
0.015606
0.019751
0.101683
0.095343
0.095343
0.019995
0.019995
0.019995
0
0.683556
0.023172
4,445
38
585
116.973684
0.26094
0.016198
0
0.166667
0
0
0.778998
0.773507
0
1
0
0
0.166667
1
0.25
false
0
0.166667
0.166667
0.583333
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
1
0
0
0
0
0
1
1
null
1
0
0
0
0
1
0
0
0
1
0
0
0
9
2e923ecb7c4c1365d69bbadf064ab68ec68bbccb
3,672
py
Python
keyboards/inline/__init__.py
itcosplay/cryptobot
6890cfde64a631bf0e4db55f6873a2217212d801
[ "MIT" ]
null
null
null
keyboards/inline/__init__.py
itcosplay/cryptobot
6890cfde64a631bf0e4db55f6873a2217212d801
[ "MIT" ]
null
null
null
keyboards/inline/__init__.py
itcosplay/cryptobot
6890cfde64a631bf0e4db55f6873a2217212d801
[ "MIT" ]
null
null
null
from .request_kb import create_kp_operation_type from .request_kb import create_kb_choose_currency from .request_kb import create_kb_choose_card from .request_kb import create_kb_send_request from .request_kb import create_kb_plus_minus from .request_kb import create_kb_smart_choose_curr from .request_kb import create_kb_send_request_for_change from .request_kb import create_kb_send_request_atm from .request_kb import create_kb_choose_date from .in_processing import create_kb_current_requests from .in_processing import create_kb_chosen_request from .in_processing import create_kb_what_sum from .in_processing import create_kb_choose_currency_processing from .in_processing import create_kb_confirm_close from .in_processing import create_kb_what_sum_correct from .in_processing import create_kb_what_blue from .in_processing import create_kb_confirm_blue from .in_processing import create_kb_corrected_sum from .in_processing import create_kb_confirm_reserve from .in_processing import create_kb_sum_correct_chunk from .in_processing import create_kb_message_keyboard from .in_processing import create_kb_confirm_close_request from .in_processing import create_kb_which_sum_close from .in_processing import create_kb_change_request from .in_processing import create_kb_change_request from .in_processing import create_kb_change_date from .in_processing import create_kb_new_request_type from .in_processing import create_kb_another_currecy_add from .in_processing import create_kb_choose_give_recive_change from .in_processing import create_kb_confirm_cancel_request from .in_processing import create_kb_plus_or_minus_sum from .in_processing import cb_current_requests from .in_processing import cb_chosen_requests from .in_processing import cb_what_sum from .in_processing import cb_choose_currency from .in_processing import cb_confirm_close from .in_processing import cb_what_sum_correct from .in_processing import cb_what_bluе from .in_processing import cb_confirm_blue from .in_processing import cb_corrected_sum from .in_processing import cb_confirm_reserve from .in_processing import cb_sum_correct_chunk from .in_processing import cb_message_keyboard from .in_processing import cb_confirm_close_request from .in_processing import cb_which_sum_close from .in_processing import cb_change_request from .in_processing import cb_anoter_currency_add from .permits import create_kb_all_permits from .permits import create_kb_set_status_permit from .permits import create_kb_confirm_single_permit from .permits import cb_all_permits from .permits import cb_set_status_prmt from .smsinfo import create_kb_who_waste from .smsinfo import create_kb_yes_no_note from .smsinfo import create_kb_for_what_waste from .smsinfo import cb_who_waste from .smsinfo import cb_yes_no_note from .smsinfo import cb_for_what_waste from .balance_keyboards import create_kb_what_balance_to_show from .back_button_keyboard import create_kb_back_button from .report_keyboards import create_kb_reports_menu from .report_keyboards import create_kb_box_office from .report_keyboards import create_kb_confirm_box_office from .report_keyboards import create_kb_what_date_report from .report_keyboards import create_kb_daily_report from .report_keyboards import create_kb_finished_requests from .report_keyboards import cb_finished_requests from .report_keyboards import create_kb_change_fin_request from .report_keyboards import create_kb_another_currecy_add_fin from .report_keyboards import cb_anoter_currency_add_fin from .report_keyboards import create_kb_change_sum_finished_req from .report_keyboards import cb_change_finished_req from .log_keyboards import create_kb_under_log
44.240964
63
0.898148
590
3,672
5.084746
0.132203
0.196
0.224
0.278667
0.864333
0.741333
0.487
0.159333
0.040667
0.040667
0
0
0.081972
3,672
83
64
44.240964
0.889944
0
0
0.027397
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
2ea920e8e435d2ac3afb22fdde1eefa3a84c4925
47
py
Python
tests/test_import_package.py
jmann277/oura_cdm
de51c780d49744234757ddce2718a59abd8d8a03
[ "MIT" ]
null
null
null
tests/test_import_package.py
jmann277/oura_cdm
de51c780d49744234757ddce2718a59abd8d8a03
[ "MIT" ]
null
null
null
tests/test_import_package.py
jmann277/oura_cdm
de51c780d49744234757ddce2718a59abd8d8a03
[ "MIT" ]
null
null
null
def test_import_package(): import oura_cdm
15.666667
26
0.765957
7
47
4.714286
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.170213
47
2
27
23.5
0.846154
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0
1
0
1.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
2c03ed4f4c7183eee03bdc4d8c595b50e8f56967
5,508
py
Python
suppy/test/integration/verification/test_verification.py
bmaris98/suppy
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
[ "BSD-3-Clause" ]
null
null
null
suppy/test/integration/verification/test_verification.py
bmaris98/suppy
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
[ "BSD-3-Clause" ]
null
null
null
suppy/test/integration/verification/test_verification.py
bmaris98/suppy
8450c6d25ffa492cdedfbbb4c111d22e7f2788a7
[ "BSD-3-Clause" ]
null
null
null
import pytest from suppy.utils.stats_constants import ERRORS, ERRORS_FOUND, ERRORS_MISSED, RESOURCE_COUNT, TOTAL_CALIBRATION_COST, WITHOUT_ERROR_COUNT, WITH_ERROR_COUNT from suppy.simulator.atomic_network import AtomicNetwork from suppy.simulator.resource_stream import ResourceStream from suppy.simulator.atomics.end_atomic import EndAtomic from suppy.simulator.atomics.start_atomic import StartAtomic from suppy.simulator.atomics.random_error_atomic import RandomErrorAtomic from suppy.simulator.atomics.verification_atomic import VerificationAtomic from suppy.simulator.event_handler import EventHandler def test_atomic_50(): rate = 0.5 duration = 10 cost = 200 calibration_cost = 500 calibration_duration = 40 calibration_steps = 50 error_type = 'ERR' start_uid = 'start' resource_count = 500 resource_type = 'type' seh = EventHandler() start_atomic = StartAtomic(start_uid, seh, 'start', resource_type, resource_count) error_atomic = RandomErrorAtomic('rnd', seh, 'random_error', error_type, rate) test_atomic = VerificationAtomic('test', seh, 'test', duration, cost, calibration_duration, calibration_steps, calibration_cost, error_type, 1) end_error = EndAtomic('end_error', seh, 'end0') end_ok = EndAtomic('end_ok', seh, 'end1') start_error_stream = ResourceStream(start_atomic, error_atomic) error_test_stream = ResourceStream(error_atomic, test_atomic) test_end_error_stream = ResourceStream(test_atomic, end_error) test_end_ok_stream = ResourceStream(test_atomic, end_ok) start_atomic.register_output_stream(start_error_stream) error_atomic.register_input_stream(start_error_stream) error_atomic.register_output_stream(error_test_stream) test_atomic.register_input_stream(error_test_stream) test_atomic.register_output_stream(test_end_error_stream) test_atomic.register_output_stream(test_end_ok_stream) end_error.register_input_stream(test_end_error_stream) end_ok.register_input_stream(test_end_ok_stream) network = AtomicNetwork() network.add_atomic(start_atomic) network.add_atomic(error_atomic) network.add_atomic(test_atomic) network.add_atomic(end_error) network.add_atomic(end_ok) network.mark_as_start(start_atomic.uid) seh.run_on_network(network) test_stats = test_atomic.get_stats() end_error_stats = end_error.get_stats() end_ok_stats = end_ok.get_stats() assert not end_error_stats[RESOURCE_COUNT] == 0 assert not end_ok_stats[RESOURCE_COUNT] == 0 assert end_error_stats[RESOURCE_COUNT] == test_stats[WITH_ERROR_COUNT] assert end_ok_stats[RESOURCE_COUNT] == test_stats[WITHOUT_ERROR_COUNT] assert end_ok_stats[RESOURCE_COUNT] + end_error_stats[RESOURCE_COUNT] == resource_count assert test_stats[TOTAL_CALIBRATION_COST] == calibration_cost * 10 assert test_stats[ERRORS_MISSED] == 0 assert test_stats[ERRORS_FOUND] == end_error_stats[RESOURCE_COUNT] def test_atomic_100_2(): rate = 0.5 duration = 10 cost = 200 calibration_cost = 500 calibration_duration = 40 calibration_steps = 50 error_type = 'ERR' start_uid = 'start' resource_count = 1000 resource_type = 'type' seh = EventHandler() start_atomic = StartAtomic(start_uid, seh, 'start', resource_type, resource_count) error_atomic = RandomErrorAtomic('rnd', seh, 'random_error', error_type, rate) test_atomic = VerificationAtomic('test', seh, 'test', duration, cost, calibration_duration, calibration_steps, calibration_cost, error_type, 2) end_error = EndAtomic('end_error', seh, 'end0') end_ok = EndAtomic('end_ok', seh, 'end1') start_error_stream = ResourceStream(start_atomic, error_atomic) error_test_stream = ResourceStream(error_atomic, test_atomic) test_end_error_stream = ResourceStream(test_atomic, end_error) test_end_ok_stream = ResourceStream(test_atomic, end_ok) start_atomic.register_output_stream(start_error_stream) error_atomic.register_input_stream(start_error_stream) error_atomic.register_output_stream(error_test_stream) test_atomic.register_input_stream(error_test_stream) test_atomic.register_output_stream(test_end_error_stream) test_atomic.register_output_stream(test_end_ok_stream) end_error.register_input_stream(test_end_error_stream) end_ok.register_input_stream(test_end_ok_stream) network = AtomicNetwork() network.add_atomic(start_atomic) network.add_atomic(error_atomic) network.add_atomic(test_atomic) network.add_atomic(end_error) network.add_atomic(end_ok) network.mark_as_start(start_atomic.uid) seh.run_on_network(network) test_stats = test_atomic.get_stats() end_error_stats = end_error.get_stats() end_ok_stats = end_ok.get_stats() assert not end_error_stats[RESOURCE_COUNT] == 0 assert not end_ok_stats[RESOURCE_COUNT] == 0 assert end_error_stats[RESOURCE_COUNT] == test_stats[ERRORS_FOUND] assert end_ok_stats[RESOURCE_COUNT] == resource_count - test_stats[ERRORS_FOUND] assert end_ok_stats[RESOURCE_COUNT] + end_error_stats[RESOURCE_COUNT] == resource_count assert test_stats[TOTAL_CALIBRATION_COST] == calibration_cost * 20 / 2 assert test_stats[ERRORS_MISSED] == test_stats[WITH_ERROR_COUNT] - test_stats[ERRORS_FOUND] assert test_stats[ERRORS_FOUND] / resource_count == pytest.approx(1/4, 0.1) assert end_ok_stats[ERRORS][error_type] / resource_count == pytest.approx(1/4, 0.1)
41.413534
154
0.780501
752
5,508
5.287234
0.098404
0.054326
0.058853
0.052314
0.847082
0.807596
0.799799
0.799799
0.772887
0.772887
0
0.013707
0.13907
5,508
133
155
41.413534
0.824757
0
0
0.754717
0
0
0.022872
0
0
0
0
0
0.160377
1
0.018868
false
0
0.084906
0
0.103774
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
d3231898ba50d84030286cf811754648983d141f
101
py
Python
thinkpython_allen_downey/exercise_8_8.py
alirkaya/programming-textbook-solutions
7362dce474b8a881d654f95604e09d1d0e76aec2
[ "MIT" ]
null
null
null
thinkpython_allen_downey/exercise_8_8.py
alirkaya/programming-textbook-solutions
7362dce474b8a881d654f95604e09d1d0e76aec2
[ "MIT" ]
null
null
null
thinkpython_allen_downey/exercise_8_8.py
alirkaya/programming-textbook-solutions
7362dce474b8a881d654f95604e09d1d0e76aec2
[ "MIT" ]
null
null
null
print('\nthis is a new line'.strip('\n')) print('\nthis is a new line'.replace(' is ', ' will be '))
33.666667
58
0.60396
18
101
3.388889
0.611111
0.327869
0.393443
0.42623
0.655738
0.655738
0
0
0
0
0
0
0.158416
101
2
59
50.5
0.717647
0
0
0
0
0
0.544554
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
7
d36415d8ed2325f8d2503930de782cc80de11138
1,974
py
Python
Ch_7_Mangle Data Like a Pro/demo_byte_bytearray.py
brianchiang-tw/Introducing_Python
557fcddb6329741a177d6ee1d24122b36e106235
[ "MIT" ]
1
2020-07-21T08:34:08.000Z
2020-07-21T08:34:08.000Z
Ch_7_Mangle Data Like a Pro/demo_byte_bytearray.py
brianchiang-tw/Introducing_Python
557fcddb6329741a177d6ee1d24122b36e106235
[ "MIT" ]
null
null
null
Ch_7_Mangle Data Like a Pro/demo_byte_bytearray.py
brianchiang-tw/Introducing_Python
557fcddb6329741a177d6ee1d24122b36e106235
[ "MIT" ]
null
null
null
blist = [ 1, 2, 3, 255 ] # byte is immutable the_bytes = bytes( blist ) # b'\x01\x02\x03\xff' print( the_bytes ) the_byte_array = bytearray( blist ) # bytearray(b'\x01\x02\x03\xff') print( the_byte_array ) # bytearray is mutable the_byte_array = bytearray( blist ) print( the_byte_array ) the_byte_array[0] = 127 print( the_byte_array ) the_bytes = bytes( range(0, 256) ) the_byte_array = bytearray( range(0, 256) ) ''' b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff' ''' print( the_bytes ) ''' bytearray(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff') ''' print( the_byte_array )
58.058824
749
0.720365
407
1,974
3.439803
0.447174
0.04
0.068571
0.06
0.891429
0.812857
0.812857
0.782857
0.782857
0.782857
0
0.197034
0.043566
1,974
34
750
58.058824
0.544492
0.045086
0
0.615385
0
0
0
0
0
1
0
0
0
1
0
false
0
0
0
0
0.461538
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
1
0
8
6cb64a67a7c29dcf9b2f060a17a17f7fd44d8009
183
py
Python
covid_particle_filter/particle/SEIQHR/__init__.py
MAPMG/EpiCoMP
5e977b46b660391fdb9cdc66f0dd67ee388b7d9a
[ "BSD-3-Clause" ]
1
2021-06-09T18:33:57.000Z
2021-06-09T18:33:57.000Z
covid_particle_filter/particle/SEIQHR/__init__.py
MAPMG/EpiCoMP
5e977b46b660391fdb9cdc66f0dd67ee388b7d9a
[ "BSD-3-Clause" ]
1
2021-07-19T19:30:51.000Z
2021-07-19T19:33:16.000Z
covid_particle_filter/particle/SEIQHR/__init__.py
MAPMG/EpiCoMP
5e977b46b660391fdb9cdc66f0dd67ee388b7d9a
[ "BSD-3-Clause" ]
null
null
null
from covid_particle_filter.particle.SEIQHR.SEIQHR import * from covid_particle_filter.particle.SEIQHR.CombinedSEQIHR import * from covid_particle_filter.particle.HCompartment import *
61
66
0.879781
23
183
6.73913
0.347826
0.174194
0.329032
0.445161
0.754839
0.754839
0
0
0
0
0
0
0.060109
183
3
67
61
0.901163
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
1
0
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
6cbbfe84839a0bfce88d55d1071928706bf62d2c
178
py
Python
sportsdataverse/nhl/__init__.py
saiemgilani/sportsdataverse-py
77ae3accbb071b5308335b931e4e55a65e1500cd
[ "MIT" ]
12
2021-10-15T01:24:18.000Z
2022-03-15T17:00:22.000Z
sportsdataverse/nhl/__init__.py
saiemgilani/sportsdataverse-py
77ae3accbb071b5308335b931e4e55a65e1500cd
[ "MIT" ]
19
2021-11-02T05:53:41.000Z
2022-03-16T14:16:51.000Z
sportsdataverse/nhl/__init__.py
saiemgilani/sportsdataverse-py
77ae3accbb071b5308335b931e4e55a65e1500cd
[ "MIT" ]
1
2021-12-21T14:49:25.000Z
2021-12-21T14:49:25.000Z
from sportsdataverse.nhl.nhl_loaders import * from sportsdataverse.nhl.nhl_pbp import * from sportsdataverse.nhl.nhl_schedule import * from sportsdataverse.nhl.nhl_teams import *
44.5
46
0.848315
24
178
6.125
0.333333
0.517007
0.598639
0.680272
0.632653
0
0
0
0
0
0
0
0.08427
178
4
47
44.5
0.90184
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
9f2e82606ec19b8c240da35faddf3e99eda570e1
198
py
Python
src/phorum/models/querysets.py
sairon/score-phorum
7fdad0427b7f22935f0cf1bcee8d1ff4a9495196
[ "BSD-3-Clause" ]
1
2015-09-20T08:30:24.000Z
2015-09-20T08:30:24.000Z
src/phorum/models/querysets.py
sairon/score-phorum
7fdad0427b7f22935f0cf1bcee8d1ff4a9495196
[ "BSD-3-Clause" ]
4
2016-03-30T18:21:25.000Z
2021-06-10T17:42:48.000Z
src/phorum/models/querysets.py
sairon/score-phorum
7fdad0427b7f22935f0cf1bcee8d1ff4a9495196
[ "BSD-3-Clause" ]
1
2016-01-07T00:45:09.000Z
2016-01-07T00:45:09.000Z
from django.db import models class RoomQueryset(models.QuerySet): def pinned(self): return self.filter(pinned=True) def not_pinned(self): return self.filter(pinned=False)
19.8
40
0.69697
26
198
5.269231
0.615385
0.145985
0.233577
0.291971
0.467153
0.467153
0
0
0
0
0
0
0.207071
198
9
41
22
0.872611
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.166667
0.333333
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
7
9f580fda3c1a28dd0ac5ae98787a9e112c8bbaee
12,214
py
Python
modules/unet.py
ykivva/Consistency_LS
11ad36c8ddad7dec2bbc3e49850186dba8e7985c
[ "MIT" ]
null
null
null
modules/unet.py
ykivva/Consistency_LS
11ad36c8ddad7dec2bbc3e49850186dba8e7985c
[ "MIT" ]
null
null
null
modules/unet.py
ykivva/Consistency_LS
11ad36c8ddad7dec2bbc3e49850186dba8e7985c
[ "MIT" ]
null
null
null
import os, sys, math, random, itertools import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torchvision import datasets, transforms, models from torch.optim.lr_scheduler import MultiStepLR from torch.utils.checkpoint import checkpoint from models import TrainableModel from utils import * import pdb class UNet_up_block(nn.Module): def __init__(self, prev_channels, input_channels, output_channels, up_sample=True): super().__init__() self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) self.conv1 = nn.Conv2d(prev_channels + input_channels, output_channels, 3, padding=1) self.bn1 = nn.GroupNorm(8, output_channels) self.conv2 = nn.Conv2d(output_channels, output_channels, 3, padding=1) self.bn2 = nn.GroupNorm(8, output_channels) self.conv3 = nn.Conv2d(output_channels, output_channels, 3, padding=1) self.bn3 = nn.GroupNorm(8, output_channels) self.relu = torch.nn.ReLU() self.up_sample = up_sample def forward(self, prev_feature_map, x): if self.up_sample: x = self.up_sampling(x) x = torch.cat((x, prev_feature_map), dim=1) x = self.relu(self.bn1(self.conv1(x))) x = self.relu(self.bn2(self.conv2(x))) x = self.relu(self.bn3(self.conv3(x))) return x class UNet_down_block(nn.Module): def __init__(self, input_channels, output_channels, down_size=True): super().__init__() self.conv1 = nn.Conv2d(input_channels, output_channels, 3, padding=1) self.bn1 = nn.GroupNorm(8, output_channels) self.conv2 = nn.Conv2d(output_channels, output_channels, 3, padding=1) self.bn2 = nn.GroupNorm(8, output_channels) self.conv3 = nn.Conv2d(output_channels, output_channels, 3, padding=1) self.bn3 = nn.GroupNorm(8, output_channels) self.max_pool = nn.MaxPool2d(2, 2) self.relu = nn.ReLU() self.down_size = down_size def forward(self, x): x = self.relu(self.bn1(self.conv1(x))) x = self.relu(self.bn2(self.conv2(x))) x = self.relu(self.bn3(self.conv3(x))) if self.down_size: x = self.max_pool(x) return x class UNet(TrainableModel): def __init__(self, downsample=6, in_channels=3, out_channels=3): super().__init__() self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample self.down1 = UNet_down_block(in_channels, 16, False) self.down_blocks = nn.ModuleList( [UNet_down_block(2**(4+i), 2**(5+i), True) for i in range(0, downsample)] ) bottleneck = 2**(4 + downsample) self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn1 = nn.GroupNorm(8, bottleneck) self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn2 = nn.GroupNorm(8, bottleneck) self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn3 = nn.GroupNorm(8, bottleneck) self.up_blocks = nn.ModuleList( [UNet_up_block(2**(4+i), 2**(5+i), 2**(4+i)) for i in range(0, downsample)] ) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): x = self.down1(x) xvals = [x] for i in range(0, self.downsample): x = self.down_blocks[i](x) xvals.append(x) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.relu(self.last_conv2(x)) return x def loss(self, pred, target): loss = torch.tensor(0.0, device=pred.device) return loss, (loss.detach(),) class UNetReshade(TrainableModel): def __init__(self, downsample=6, in_channels=3, out_channels=3): super().__init__() self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample self.down1 = UNet_down_block(in_channels, 16, False) self.down_blocks = nn.ModuleList( [UNet_down_block(2**(4+i), 2**(5+i), True) for i in range(0, downsample)] ) bottleneck = 2**(4 + downsample) self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn1 = nn.GroupNorm(8, bottleneck) self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn2 = nn.GroupNorm(8, bottleneck) self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1) self.bn3 = nn.GroupNorm(8, bottleneck) self.up_blocks = nn.ModuleList( [UNet_up_block(2**(4+i), 2**(5+i), 2**(4+i)) for i in range(0, downsample)] ) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): x = self.down1(x) xvals = [x] for i in range(0, self.downsample): x = self.down_blocks[i](x) xvals.append(x) x = self.relu(self.bn1(self.mid_conv1(x))) x = self.relu(self.bn2(self.mid_conv2(x))) x = self.relu(self.bn3(self.mid_conv3(x))) for i in range(0, self.downsample)[::-1]: x = self.up_blocks[i](xvals[i], x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.relu(self.last_conv2(x)) x = x.clamp(max=1, min=0).mean(dim=1, keepdim=True) x = x.expand(-1, 3, -1, -1) return x def loss(self, pred, target): loss = torch.tensor(0.0, device=pred.device) return loss, (loss.detach(),) class UNetOld(TrainableModel): def __init__(self, in_channels=3, out_channels=3): super().__init__() self.in_channels, self.out_channels = in_channels, out_channels self.down_block1 = UNet_down_block(in_channels, 16, False) # 256 self.down_block2 = UNet_down_block(16, 32, True) # 128 self.down_block3 = UNet_down_block(32, 64, True) # 64 self.down_block4 = UNet_down_block(64, 128, True) # 32 self.down_block5 = UNet_down_block(128, 256, True) # 16 self.down_block6 = UNet_down_block(256, 512, True) # 8 self.down_block7 = UNet_down_block(512, 1024, True)# 4 self.mid_conv1 = nn.Conv2d(1024, 1024, 3, padding=1) self.bn1 = nn.GroupNorm(8, 1024) self.mid_conv2 = nn.Conv2d(1024, 1024, 3, padding=1) self.bn2 = nn.GroupNorm(8, 1024) self.mid_conv3 = torch.nn.Conv2d(1024, 1024, 3, padding=1) self.bn3 = nn.GroupNorm(8, 1024) self.up_block1 = UNet_up_block(512, 1024, 512) self.up_block2 = UNet_up_block(256, 512, 256) self.up_block3 = UNet_up_block(128, 256, 128) self.up_block4 = UNet_up_block(64, 128, 64) self.up_block5 = UNet_up_block(32, 64, 32) self.up_block6 = UNet_up_block(16, 32, 16) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): self.x1 = self.down_block1(x) self.x2 = self.down_block2(self.x1) self.x3 = self.down_block3(self.x2) self.x4 = self.down_block4(self.x3) self.x5 = self.down_block5(self.x4) self.x6 = self.down_block6(self.x5) self.x7 = self.down_block7(self.x6) self.x7 = self.relu(self.bn1(self.mid_conv1(self.x7))) self.x7 = self.relu(self.bn2(self.mid_conv2(self.x7))) self.x7 = self.relu(self.bn3(self.mid_conv3(self.x7))) x = self.up_block1(self.x6, self.x7) x = self.up_block2(self.x5, x) x = self.up_block3(self.x4, x) x = self.up_block4(self.x3, x) x = self.up_block5(self.x2, x) x = self.up_block6(self.x1, x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.relu(self.last_conv2(x)) return x def loss(self, pred, target): loss = torch.tensor(0.0, device=pred.device) return loss, (loss.detach(),) class ConvBlock(nn.Module): def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=True, groups=8, dilation=1, transpose=False): super().__init__() self.transpose = transpose self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=padding*dilation) if self.transpose: self.convt = nn.ConvTranspose2d( f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1 ) if use_groupnorm: self.bn = nn.GroupNorm(groups, f1) else: self.bn = nn.BatchNorm2d(f1) def forward(self, x): # x = F.dropout(x, 0.04, self.training) x = self.bn(x) if self.transpose: # x = F.upsample(x, scale_factor=2, mode='bilinear') x = F.relu(self.convt(x)) # x = x[:, :, :-1, :-1] x = F.relu(self.conv(x)) return x class UNetOld2(TrainableModel): def __init__(self, in_channels=3, out_channels=3): super().__init__() self.in_channels, self.out_channels = in_channels, out_channels self.initial = nn.Sequential( ConvBlock(in_channels, 16, groups=3, kernel_size=1, padding=0), ConvBlock(16, 16, groups=4, kernel_size=1, padding=0) ) self.down_block1 = UNet_down_block(16, 16, False) self.down_block2 = UNet_down_block(16, 32, True) # 128 self.down_block3 = UNet_down_block(32, 64, True) # 64 self.down_block4 = UNet_down_block(64, 128, True) # 32 self.down_block5 = UNet_down_block(128, 256, True) # 16 self.down_block6 = UNet_down_block(256, 512, True) # 8 self.down_block7 = UNet_down_block(512, 1024, True)# 4 self.mid_conv1 = nn.Conv2d(1024, 1024, 3, padding=1) self.bn1 = nn.GroupNorm(8, 1024) self.mid_conv2 = nn.Conv2d(1024, 1024, 3, padding=1) self.bn2 = nn.GroupNorm(8, 1024) self.mid_conv3 = torch.nn.Conv2d(1024, 1024, 3, padding=1) self.bn3 = nn.GroupNorm(8, 1024) self.up_block1 = UNet_up_block(512, 1024, 512) self.up_block2 = UNet_up_block(256, 512, 256) self.up_block3 = UNet_up_block(128, 256, 128) self.up_block4 = UNet_up_block(64, 128, 64) self.up_block5 = UNet_up_block(32, 64, 32) self.up_block6 = UNet_up_block(16, 32, 16) self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1) self.last_bn = nn.GroupNorm(8, 16) self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0) self.relu = nn.ReLU() def forward(self, x): x = self.initial(x) self.x1 = self.down_block1(x) self.x2 = self.down_block2(self.x1) self.x3 = self.down_block3(self.x2) self.x4 = self.down_block4(self.x3) self.x5 = self.down_block5(self.x4) self.x6 = self.down_block6(self.x5) self.x7 = self.down_block7(self.x6) self.x7 = self.relu(self.bn1(self.mid_conv1(self.x7))) self.x7 = self.relu(self.bn2(self.mid_conv2(self.x7))) self.x7 = self.relu(self.bn3(self.mid_conv3(self.x7))) x = self.up_block1(self.x6, self.x7) x = self.up_block2(self.x5, x) x = self.up_block3(self.x4, x) x = self.up_block4(self.x3, x) x = self.up_block5(self.x2, x) x = self.up_block6(self.x1, x) x = self.relu(self.last_bn(self.last_conv1(x))) x = self.relu(self.last_conv2(x)) return x def loss(self, pred, target): loss = torch.tensor(0.0, device=pred.device) return loss, (loss.detach(),)
39.273312
116
0.611921
1,830
12,214
3.911475
0.081967
0.032132
0.025147
0.039955
0.824811
0.803297
0.782341
0.778011
0.778011
0.778011
0
0.076039
0.251678
12,214
311
117
39.273312
0.707112
0.013018
0
0.767717
0
0
0.000665
0
0
0
0
0
0
1
0.070866
false
0
0.043307
0
0.185039
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
9f7c9326c8ea0931a2195adf65a54ceee289ca33
8,287
py
Python
py/HW3/option_models/sabr.py
LantianXue/ASP
3b7b3a60079e9f4d91b6f2f0d8b62e34b16b432e
[ "MIT" ]
null
null
null
py/HW3/option_models/sabr.py
LantianXue/ASP
3b7b3a60079e9f4d91b6f2f0d8b62e34b16b432e
[ "MIT" ]
null
null
null
py/HW3/option_models/sabr.py
LantianXue/ASP
3b7b3a60079e9f4d91b6f2f0d8b62e34b16b432e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Oct 10 @author: jaehyuk """ import numpy as np import scipy.stats as ss import scipy.optimize as sopt from . import normal from . import bsm import pyfeng as pf ''' MC model class for Beta=1 ''' class ModelBsmMC: beta = 1.0 # fixed (not used) vov, rho = 0.0, 0.0 sigma, intr, divr = None, None, None bsm_model = None ''' You may define more members for MC: time step, etc ''' def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0): self.sigma = sigma self.vov = vov self.rho = rho self.intr = intr self.divr = divr self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr) def bsm_vol(self, strike, spot, texp=None, sigma=None): '''' From the price from self.price() compute the implied vol this is the opposite of bsm_vol in ModelHagan class use bsm_model ''' return 0 def price(self, strike, spot, texp=None, sigma=None, cp=1,random_seed = 12345): ''' Your MC routine goes here Generate paths for vol and price first. Then get prices (vector) for all strikes You may fix the random number seed ''' n_interval = 100 n_iter =10000 delta_t = texp/n_interval prices = [] np.random.seed(random_seed) # get the whole price path and sigma path every iteration for i in range(n_iter): z1 = np.random.randn(n_interval) z2 = np.random.randn(n_interval) w1 = self.rho*z1 + np.sqrt(1-np.power(self.rho,2))*z2 sis = np.exp(self.vov*np.sqrt(delta_t)*z1-0.5*np.power(self.vov,2)*delta_t) sis[1:]=sis[:-1] sis[0] = self.sigma sis = np.cumprod(sis) deltap = np.exp(sis*np.sqrt(delta_t)*w1-0.5*np.power(sis,2)*delta_t) deltap[0]*=spot pts = np.cumprod(deltap) prices.append(pts[-1]) strikes = np.array([strike]*n_iter).T callp = -strikes+prices callp = np.where(callp>0,callp,0) # record the call price among all our MC self.cprice_paths = callp finalp = callp.mean(axis = 1) return finalp ''' MC model class for Beta=0 ''' class ModelNormalMC: beta = 0.0 # fixed (not used) vov, rho = 0.0, 0.0 sigma, intr, divr = None, None, None normal_model = None def __init__(self, sigma, vov=0, rho=0.0, beta=0.0, intr=0, divr=0): self.sigma = sigma self.vov = vov self.rho = rho self.intr = intr self.divr = divr self.normal_model = pf.Norm(sigma, intr=intr, divr=divr) def norm_vol(self, strike, spot, texp=None, sigma=None): '''' From the price from self.price() compute the implied vol this is the opposite of normal_vol in ModelNormalHagan class use normal_model ''' return 0 def price(self, strike, spot, texp=None, sigma=None, cp=1, random_seed = 12345): ''' Your MC routine goes here Generate paths for vol and price first. Then get prices (vector) for all strikes You may fix the random number seed ''' n_interval = 100 n_iter =10000 delta_t = texp/n_interval prices = [] np.random.seed(random_seed) # get the whole price path and sigma path every iteration for i in range(n_iter): z1 = np.random.randn(n_interval) z2 = np.random.randn(n_interval) w1 = self.rho*z1 + np.sqrt(1-np.power(self.rho,2))*z2 sis = np.exp(self.vov*np.sqrt(delta_t)*z1-0.5*np.power(self.vov,2)*delta_t) sis[1:]=sis[:-1] sis[0] = self.sigma sis = np.cumprod(sis) deltap = sis*np.sqrt(delta_t)*w1 deltap[0]+=spot pts = np.cumsum(deltap) prices.append(pts[-1]) strikes = np.array([strike]*n_iter).T callp = -strikes+prices callp = np.where(callp>0,callp,0) # record the call price among all our MC self.cprice_paths = callp finalp = callp.mean(axis = 1) return finalp ''' Conditional MC model class for Beta=1 ''' class ModelBsmCondMC: beta = 1.0 # fixed (not used) vov, rho = 0.0, 0.0 sigma, intr, divr = None, None, None bsm_model = None ''' You may define more members for MC: time step, etc ''' def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0): self.sigma = sigma self.vov = vov self.rho = rho self.intr = intr self.divr = divr self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr) def bsm_vol(self, strike, spot, texp=None): '''' From the price from self.price() compute the implied vol this is the opposite of bsm_vol in ModelHagan class use bsm_model should be same as bsm_vol method in ModelBsmMC (just copy & paste) ''' return 0 def price(self, strike, spot, texp=None, cp=1,random_seed = 12345): ''' Your MC routine goes here Generate paths for vol only. Then compute integrated variance and BSM price. Then get prices (vector) for all strikes You may fix the random number seed ''' n_interval = 100 n_iter =10000 delta_t = texp/n_interval prices = [] np.random.seed(random_seed) # get a whole sigma path every iteration for i in range(n_iter): z1 = np.random.randn(n_interval) sis = np.exp(self.vov*np.sqrt(delta_t)*z1-0.5*np.power(self.vov,2)*delta_t) sis[1:]=sis[:-1] sis[0] = self.sigma sis = np.cumprod(sis) var = np.power(sis,2)/sis[0]**2 it = var.mean() s0 = spot*np.exp(self.rho/self.vov*(sis[-1]-sis[0])-0.5*np.power(self.rho*sis[0],2)*texp*it) sigma_bs = sis[0]*np.sqrt((1-self.rho**2)*it) prices.append(bsm.price(strike,s0,texp,sigma_bs)) prices = np.array(prices) # record the call price among our CMC self.cprice_paths = prices finalp = prices.mean(axis = 0) return finalp ''' Conditional MC model class for Beta=0 ''' class ModelNormalCondMC: beta = 0.0 # fixed (not used) vov, rho = 0.0, 0.0 sigma, intr, divr = None, None, None normal_model = None def __init__(self, sigma, vov=0, rho=0.0, beta=0.0, intr=0, divr=0): self.sigma = sigma self.vov = vov self.rho = rho self.intr = intr self.divr = divr self.normal_model = pf.Norm(sigma, intr=intr, divr=divr) def norm_vol(self, strike, spot, texp=None): '''' From the price from self.price() compute the implied vol this is the opposite of normal_vol in ModelNormalHagan class use normal_model should be same as norm_vol method in ModelNormalMC (just copy & paste) ''' return 0 def price(self, strike, spot, texp=None, cp=1,random_seed =12345): ''' Your MC routine goes here Generate paths for vol only. Then compute integrated variance and normal price. You may fix the random number seed ''' n_interval = 100 n_iter =10000 delta_t = texp/n_interval prices = [] np.random.seed(random_seed) # get a whole sigma path every iteration for i in range(n_iter): z1 = np.random.randn(n_interval) sis = np.exp(self.vov*np.sqrt(delta_t)*z1-0.5*np.power(self.vov,2)*delta_t) sis[1:]=sis[:-1] sis[0] = self.sigma sis = np.cumprod(sis) var = np.power(sis,2)/sis[0]**2 it = var.mean() s0 = spot+self.rho/self.vov*(sis[-1]-sis[0]) sigma_nm = sis[0]*np.sqrt((1-self.rho**2)*it) prices.append(normal.price(strike,s0,texp,sigma_nm)) prices = np.array(prices) # record all the prices among our CMC self.cprice_paths = prices finalp = prices.mean(axis = 0) return finalp
33.963115
104
0.568481
1,224
8,287
3.772876
0.122549
0.009095
0.015158
0.031182
0.920961
0.8822
0.874838
0.861412
0.838891
0.838891
0
0.036113
0.31833
8,287
244
105
33.963115
0.781377
0.209605
0
0.828947
0
0
0
0
0
0
0
0
0
1
0.078947
false
0
0.039474
0
0.25
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
4cc7eec44472f1dbd8722e5ef6ba809c5642dd55
837
py
Python
tests/test_utils.py
d21d3q/tornado-restplus
828c942271af0fb5db8c39da488e486cda65ba48
[ "MIT" ]
1
2019-05-11T09:21:50.000Z
2019-05-11T09:21:50.000Z
tests/test_utils.py
d21d3q/tornado-restplus
828c942271af0fb5db8c39da488e486cda65ba48
[ "MIT" ]
null
null
null
tests/test_utils.py
d21d3q/tornado-restplus
828c942271af0fb5db8c39da488e486cda65ba48
[ "MIT" ]
null
null
null
from unittest import TestCase from tornado_restplus.utils import make_path_chunk class UtilsTest(TestCase): def test_make_path_chunk(self): valid = '/chunk' assert make_path_chunk('/chunk') == valid assert make_path_chunk('chunk') == valid assert make_path_chunk('//chunk') == valid assert make_path_chunk('///chunk') == valid assert make_path_chunk('/chunk//') == valid assert make_path_chunk('/chunk//') == valid assert make_path_chunk('chunk/') == valid assert make_path_chunk('chunk//') == valid assert make_path_chunk('chunk///') == valid valid = '/double/chunk' assert make_path_chunk('/double/chunk') == valid assert make_path_chunk('//double/chunk') == valid assert make_path_chunk('/double/chunk//') == valid
38.045455
58
0.641577
101
837
5.019802
0.178218
0.220907
0.358974
0.449704
0.741617
0.721893
0.721893
0.721893
0.721893
0.721893
0
0
0.221027
837
21
59
39.857143
0.777607
0
0
0.111111
0
0
0.148148
0
0
0
0
0
0.666667
1
0.055556
false
0
0.111111
0
0.222222
0
0
0
0
null
1
1
1
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
9
4cdc5829a02dc8ccbeb2ac30e55f9532dafe1aad
12,339
py
Python
cohydra/test_profile.py
dseomn/cohydra
2b7788c35cd2aa2f2d6bba2c774aeb80d7d69a5a
[ "Apache-2.0" ]
1
2017-01-04T23:43:45.000Z
2017-01-04T23:43:45.000Z
cohydra/test_profile.py
dseomn/cohydra
2b7788c35cd2aa2f2d6bba2c774aeb80d7d69a5a
[ "Apache-2.0" ]
4
2016-11-09T03:24:19.000Z
2017-08-13T23:55:34.000Z
cohydra/test_profile.py
dseomn/cohydra
2b7788c35cd2aa2f2d6bba2c774aeb80d7d69a5a
[ "Apache-2.0" ]
1
2017-01-04T23:43:47.000Z
2017-01-04T23:43:47.000Z
import os import tempfile import unittest import unittest.mock from . import profile from . import test_helper @unittest.mock.patch.object( profile.Profile, 'generate', autospec=True, ) @unittest.mock.patch.object( profile.Profile, '__abstractmethods__', new=set(), ) class TestProfile(unittest.TestCase): def setUp(self): self.dir = tempfile.TemporaryDirectory() def tearDown(self): self.dir.cleanup() def test_generate_all(self, mock_generate): p = profile.Profile(self.dir.name, None) p0 = profile.Profile(self.dir.name, p) p00 = profile.Profile(self.dir.name, p0) p1 = profile.Profile(self.dir.name, p) p.generate_all() self.assertEqual( mock_generate.mock_calls, [unittest.mock.call(x) for x in (p, p0, p00, p1)]) class TestFilterProfile( unittest.TestCase, test_helper.SrcDstDirMixin, ): def setUp(self): test_helper.SrcDstDirMixin.setUp(self) def tearDown(self): test_helper.SrcDstDirMixin.tearDown(self) def test_clean_ok(self): os.mkdir(os.path.join(self.dst_path(), 'dir')) os.mkdir(os.path.join(self.dst_path(), 'dir', 'dir')) os.symlink( '/dev/null', os.path.join(self.dst_path(), 'dir', 'dir', 'file')) os.symlink( '/dev/null', os.path.join(self.dst_path(), 'file')) self.assertNotEqual(os.listdir(self.dst_path()), []) root = profile.RootProfile(top_dir=self.src_path()) p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb= lambda profile, src_relpath, dst_relpath, contents: contents, ) p.generate() self.assertEqual(os.listdir(self.dst_path()), []) def test_clean_error_file(self): open(os.path.join(self.dst_path(), 'file'), 'w').close() root = profile.RootProfile(top_dir=self.src_path()) p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb= lambda profile, src_relpath, dst_relpath, contents: contents, ) self.assertRaisesRegex( RuntimeError, '^Cannot clean ', p.generate, ) self.assertTrue(os.path.isfile( os.path.join(self.dst_path(), 'file'))) def test_empty_noop(self): root = profile.RootProfile(top_dir=self.src_path()) select_cb = unittest.mock.Mock(return_value=[]) p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb=select_cb, ) p.generate() select_cb.assert_called_once_with(p, '', '', []) self.assertEqual(os.listdir(self.dst_path()), []) def test_select_none(self): os.mkdir(os.path.join(self.src_path(), 'dir')) os.mkdir(os.path.join(self.src_path(), 'dir', 'dir')) open(os.path.join(self.src_path(), 'dir', 'dir', 'file'), 'w').close() open(os.path.join(self.src_path(), 'file'), 'w').close() root = profile.RootProfile(top_dir=self.src_path()) select_cb = unittest.mock.Mock(return_value=[]) p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb=select_cb, ) p.generate() select_cb.assert_called_once_with(p, '', '', unittest.mock.ANY) self.assertEqual(os.listdir(self.dst_path()), []) def test_select_all(self): os.mkdir(os.path.join(self.src_path(), 'dir')) os.mkdir(os.path.join(self.src_path(), 'dir', 'dir')) open(os.path.join(self.src_path(), 'dir', 'dir', 'file'), 'w').close() open(os.path.join(self.src_path(), 'file'), 'w').close() os.utime(os.path.join(self.src_path(), 'dir'), (0, 0)) os.utime(os.path.join(self.src_path(), 'dir', 'dir'), (0, 0)) root = profile.RootProfile(top_dir=self.src_path()) select_cb = unittest.mock.Mock( wraps= lambda profile, src_relpath, dst_relpath, contents: contents) p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb=select_cb, ) p.generate() self.assertEqual( select_cb.mock_calls, [ unittest.mock.call(p, '', '', unittest.mock.ANY), unittest.mock.call(p, 'dir', 'dir', unittest.mock.ANY), unittest.mock.call(p, 'dir/dir', 'dir/dir', unittest.mock.ANY), ], ) self.assertEqual( frozenset(os.listdir(self.dst_path())), {'dir', 'file'}) self.assertEqual( frozenset(os.listdir(os.path.join(self.src_path(), 'dir'))), {'dir'}) self.assertEqual( frozenset(os.listdir(os.path.join(self.src_path(), 'dir', 'dir'))), {'file'}) self.assertEqual( test_helper.get_preserved_attrs( os.path.join(self.src_path(), 'dir')), test_helper.get_preserved_attrs( os.path.join(self.dst_path(), 'dir')), ) self.assertEqual( test_helper.get_preserved_attrs( os.path.join(self.src_path(), 'dir', 'dir')), test_helper.get_preserved_attrs( os.path.join(self.dst_path(), 'dir', 'dir')), ) self.assertEqual( test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'file')), os.path.abspath( os.path.join(self.src_path(), 'file')), ) self.assertEqual( test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'dir', 'dir', 'file')), os.path.abspath( os.path.join(self.src_path(), 'dir', 'dir', 'file')), ) def test_select_dir_but_not_its_contents(self): os.mkdir(os.path.join(self.src_path(), 'dir')) open(os.path.join(self.src_path(), 'dir', 'file'), 'w').close() root = profile.RootProfile(top_dir=self.src_path()) select_cb = unittest.mock.Mock( wraps=lambda profile, src_relpath, dst_relpath, contents: contents if src_relpath == '' else [], ) p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb=select_cb, ) p.generate() self.assertEqual( select_cb.mock_calls, [ unittest.mock.call(p, '', '', unittest.mock.ANY), unittest.mock.call(p, 'dir', 'dir', unittest.mock.ANY), ], ) self.assertEqual(os.listdir(self.dst_path()), []) def test_rename(self): os.mkdir(os.path.join(self.src_path(), 'dir')) os.mkdir(os.path.join(self.src_path(), 'dir', 'dir')) open(os.path.join(self.src_path(), 'dir', 'dir', 'file'), 'w').close() open(os.path.join(self.src_path(), 'dir', 'file'), 'w').close() open(os.path.join(self.src_path(), 'file'), 'w').close() os.utime(os.path.join(self.src_path(), 'dir'), (0, 0)) os.utime(os.path.join(self.src_path(), 'dir', 'dir'), (0, 0)) root = profile.RootProfile(top_dir=self.src_path()) def select_cb(profile, src_relpath, dst_relpath, contents): ret = [] for entry in contents: if src_relpath == '' and entry.name == 'dir': ret.append((entry, 'dir.new')) elif src_relpath == '' and entry.name == 'file': ret.append((entry, 'file.new')) elif src_relpath == 'dir' and entry.name == 'dir': ret.append(entry) elif src_relpath == 'dir' and entry.name == 'file': ret.append((entry, os.path.join('dir.new', 'file.new'))) elif src_relpath == 'dir/dir' and entry.name == 'file': ret.append(entry) else: raise RuntimeError('Unexpected entry.') return ret select_cb = unittest.mock.Mock(wraps=select_cb) p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb=select_cb, ) p.generate() self.assertEqual( select_cb.mock_calls, [ unittest.mock.call(p, '', '', unittest.mock.ANY), unittest.mock.call(p, 'dir', 'dir.new', unittest.mock.ANY), unittest.mock.call(p, 'dir/dir', 'dir.new/dir', unittest.mock.ANY), ], ) self.assertEqual( frozenset(os.listdir(self.dst_path())), {'dir.new', 'file.new'}) self.assertEqual( frozenset(os.listdir(os.path.join(self.dst_path(), 'dir.new'))), {'dir', 'file.new'}) self.assertEqual( frozenset(os.listdir(os.path.join(self.dst_path(), 'dir.new', 'dir'))), {'file'}) self.assertEqual( test_helper.get_preserved_attrs( os.path.join(self.src_path(), 'dir')), test_helper.get_preserved_attrs( os.path.join(self.dst_path(), 'dir.new')), ) self.assertEqual( test_helper.get_preserved_attrs( os.path.join(self.src_path(), 'dir', 'dir')), test_helper.get_preserved_attrs( os.path.join(self.dst_path(), 'dir.new', 'dir')), ) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), 'file')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'file.new')), ) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), 'dir', 'file')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'dir.new', 'file.new')), ) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), 'dir', 'dir', 'file')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'dir.new', 'dir', 'file')), ) def test_rename_across_dir_error(self): os.mkdir(os.path.join(self.src_path(), 'dir')) open(os.path.join(self.src_path(), 'file'), 'w').close() root = profile.RootProfile(top_dir=self.src_path()) def select_cb(profile, src_relpath, dst_relpath, contents): ret = [] for entry in contents: if entry.name.endswith('file'): ret.append((entry, 'dir/file')) else: ret.append(entry) return ret p = profile.FilterProfile( top_dir=self.dst_path(), parent=root, select_cb=select_cb, ) self.assertRaisesRegex( NotImplementedError, '^Renaming across dirs is not supported: ', p.generate, ) class TestSanitizeFilenameProfile( unittest.TestCase, test_helper.SrcDstDirMixin, ): def setUp(self): test_helper.SrcDstDirMixin.setUp(self) root = profile.RootProfile(top_dir=self.src_path()) self.profile = profile.SanitizeFilenameProfile( top_dir=self.dst_path(), parent=root, ) def tearDown(self): test_helper.SrcDstDirMixin.tearDown(self) def test_sanitization(self): open(os.path.join(self.src_path(), ':'), 'w').close() open(os.path.join(self.src_path(), 'CON'), 'w').close() open(os.path.join(self.src_path(), 'lpt2.txt'), 'w').close() open(os.path.join(self.src_path(), 'foo.'), 'w').close() open(os.path.join(self.src_path(), 'ok'), 'w').close() self.profile.generate() self.assertEqual( frozenset(os.listdir(self.dst_path())), {'_', 'CON_', 'lpt2_.txt', 'foo_', 'ok'}) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), ':')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), '_')), ) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), 'CON')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'CON_')), ) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), 'lpt2.txt')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'lpt2_.txt')), ) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), 'foo.')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'foo_')), ) self.assertEqual( os.path.abspath( os.path.join(self.src_path(), 'ok')), test_helper.symlink_pointee_abspath( os.path.join(self.dst_path(), 'ok')), ) def test_duplicate_filename_error(self): open(os.path.join(self.src_path(), ':'), 'w').close() open(os.path.join(self.src_path(), '?'), 'w').close() self.assertRaisesRegex( RuntimeError, '^Sanitizing would create duplicate file: ', self.profile.generate, ) def test_duplicate_filename_case_error(self): open(os.path.join(self.src_path(), 'A'), 'w').close() open(os.path.join(self.src_path(), 'a'), 'w').close() self.assertRaisesRegex( RuntimeError, '^Sanitizing would create duplicate file: ', self.profile.generate, )
29.661058
77
0.613259
1,618
12,339
4.505562
0.077874
0.065844
0.09465
0.13059
0.859534
0.843759
0.817147
0.788066
0.770233
0.734431
0
0.002157
0.211119
12,339
415
78
29.73253
0.746764
0
0
0.598854
1
0
0.06208
0
0
0
0
0
0.106017
1
0.057307
false
0
0.017192
0
0.088825
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
9819c72e19523d289f37c661cb745bef733f9806
15,430
py
Python
functional_tests/test_home_page.py
szypkiwonsz/Physiotherapy-Management-System
36decab47890e2f4be259c8796f47324ffad28fe
[ "MIT" ]
null
null
null
functional_tests/test_home_page.py
szypkiwonsz/Physiotherapy-Management-System
36decab47890e2f4be259c8796f47324ffad28fe
[ "MIT" ]
8
2020-08-17T14:36:02.000Z
2022-03-12T00:33:50.000Z
functional_tests/test_home_page.py
szypkiwonsz/Physiotherapy-Management-System
36decab47890e2f4be259c8796f47324ffad28fe
[ "MIT" ]
null
null
null
from time import sleep from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.urls import reverse from selenium import webdriver from selenium.common.exceptions import ElementNotInteractableException from applications.users.models import User class TestHomePageNotLoggedIn(StaticLiveServerTestCase): def setUp(self): self.browser = webdriver.Chrome('functional_tests/chromedriver.exe') def tearDown(self): self.browser.close() def test_login_button_redirects_to_login(self): self.browser.get(self.live_server_url) login_url = self.live_server_url + reverse('login') self.browser.find_element_by_xpath( '//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() sleep(0.5) self.browser.find_element_by_xpath( '//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() self.assertEquals( self.browser.current_url, login_url ) def test_panel_button_redirects_to_login(self): self.browser.get(self.live_server_url) login_url = self.live_server_url + reverse('login') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[2]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[2]/a').click() self.assertEquals( self.browser.current_url, login_url ) def test_offices_button_redirects_to_offices(self): self.browser.get(self.live_server_url) offices_url = self.live_server_url + reverse('home_page:offices') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[3]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[3]/a').click() self.assertEquals( self.browser.current_url, offices_url ) def test_help_button_redirects_to_help(self): self.browser.get(self.live_server_url) help_url = self.live_server_url + reverse('home_page:help') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[4]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[4]/a').click() self.assertEquals( self.browser.current_url, help_url ) def test_login_button_redirects_to_login(self): self.browser.get(self.live_server_url) login_url = self.live_server_url + reverse('login') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() self.assertEquals( self.browser.current_url, login_url ) def test_register_button_redirects_to_signup_choice(self): self.browser.get(self.live_server_url) signup_url = self.live_server_url + reverse('users:signup') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[2]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[2]/a').click() self.assertEquals( self.browser.current_url, signup_url ) class TestHomePageLoggedAsPatient(StaticLiveServerTestCase): def setUp(self): self.patient1 = User.objects.create_user( 'patient', 'patient@gmail.com', 'patientpassword', is_patient=True ) self.browser = webdriver.Chrome('functional_tests/chromedriver.exe') def tearDown(self): self.browser.close() def test_panel_button_redirects_to_panel(self): self.browser.get(self.live_server_url + reverse('login')) panel_patient_url = self.live_server_url + reverse('patient_panel:home') self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('patient@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('patientpassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[2]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[2]/a').click() self.assertEquals( self.browser.current_url, panel_patient_url ) def test_offices_button_redirects_to_offices(self): self.browser.get(self.live_server_url + reverse('login')) self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('patient@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('patientpassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) offices_url = self.live_server_url + reverse('home_page:offices') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[3]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[3]/a').click() self.assertEquals( self.browser.current_url, offices_url ) def test_help_button_redirects_to_help(self): self.browser.get(self.live_server_url + reverse('login')) self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('patient@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('patientpassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) help_url = self.live_server_url + reverse('home_page:help') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[4]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[4]/a').click() self.assertEquals( self.browser.current_url, help_url ) def test_profile_button_redirects_to_profile(self): self.browser.get(self.live_server_url + reverse('login')) profile_patient_url = self.live_server_url + reverse('users:patient_profile') self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('patient@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('patientpassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() self.assertEquals( self.browser.current_url, profile_patient_url ) def test_logout_button_redirects_to_logout(self): self.browser.get(self.live_server_url + reverse('login')) logout_url = self.live_server_url + reverse('logout') self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('patient@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('patientpassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[2]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[2]/a').click() self.assertEquals( self.browser.current_url, logout_url ) class TestHomePageLoggedAsOffice(StaticLiveServerTestCase): def setUp(self): self.patient1 = User.objects.create_user( 'office', 'office@gmail.com', 'officepassword', is_office=True ) self.browser = webdriver.Chrome('functional_tests/chromedriver.exe') def tearDown(self): self.browser.close() def test_panel_button_redirects_to_panel(self): self.browser.get(self.live_server_url + reverse('login')) panel_office_url = self.live_server_url + reverse('office_panel:home') self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('office@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('officepassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[2]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[2]/a').click() self.assertEquals( self.browser.current_url, panel_office_url ) def test_offices_button_redirects_to_offices(self): self.browser.get(self.live_server_url + reverse('login')) self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('office@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('officepassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) offices_url = self.live_server_url + reverse('home_page:offices') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[3]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[3]/a').click() self.assertEquals( self.browser.current_url, offices_url ) def test_help_button_redirects_to_help(self): self.browser.get(self.live_server_url + reverse('login')) self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('office@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('officepassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) help_url = self.live_server_url + reverse('home_page:help') try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[4]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[1]/li[4]/a').click() self.assertEquals( self.browser.current_url, help_url ) def test_profile_button_redirects_to_profile(self): self.browser.get(self.live_server_url + reverse('login')) profile_office_url = self.live_server_url + reverse('users:office_profile') self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('office@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('officepassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[1]/a').click() self.assertEquals( self.browser.current_url, profile_office_url ) def test_logout_button_redirects_to_logout(self): self.browser.get(self.live_server_url + reverse('login')) logout_url = self.live_server_url + reverse('logout') self.browser.find_element_by_xpath('//*[@id="id_username"]').send_keys('office@gmail.com') self.browser.find_element_by_xpath('//*[@id="id_password"]').send_keys('officepassword') self.browser.find_element_by_xpath('/html/body/div[2]/div/form/button').click() self.browser.get(self.live_server_url + reverse('home_page:home')) try: self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[2]/a').click() except ElementNotInteractableException: # Mobile version self.browser.find_element_by_xpath('/html/body/nav/button').click() sleep(0.5) self.browser.find_element_by_xpath('//*[@id="basicExampleNav"]/ul[2]/li[2]/a').click() self.assertEquals( self.browser.current_url, logout_url )
48.21875
99
0.648866
1,912
15,430
4.964958
0.050209
0.144844
0.121669
0.178447
0.944275
0.940482
0.939956
0.921416
0.917729
0.917729
0
0.008775
0.202398
15,430
319
100
48.369906
0.762574
0.014517
0
0.850365
0
0
0.213759
0.163594
0
0
0
0
0.058394
1
0.080292
false
0.043796
0.021898
0
0.113139
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
e23f8a6da79abe1c9298afe0f50ad0b67e801a16
141
py
Python
metrics/Finantial_metics.py
Jiahui-Gu/SCINet
e46d9dcb0dd6da1f87c6c81f9454e71802a6bedb
[ "Apache-2.0" ]
169
2021-09-12T14:02:05.000Z
2022-03-31T23:30:28.000Z
metrics/Finantial_metics.py
tonylibing/SCINet
e4f53c7c50864132a0820eca8db542bc0d2b99a1
[ "Apache-2.0" ]
29
2021-09-30T07:51:15.000Z
2022-03-31T04:37:59.000Z
metrics/Finantial_metics.py
tonylibing/SCINet
e4f53c7c50864132a0820eca8db542bc0d2b99a1
[ "Apache-2.0" ]
53
2021-09-17T07:42:55.000Z
2022-03-31T07:15:38.000Z
import numpy as np def MAE(pred, true): return np.mean(np.abs(pred - true)) def MSE(pred, true): return np.mean((pred - true) ** 2)
20.142857
39
0.631206
25
141
3.56
0.52
0.359551
0.314607
0.359551
0.449438
0
0
0
0
0
0
0.008929
0.205674
141
7
40
20.142857
0.785714
0
0
0
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0.2
0.4
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
8
e243336ae611f92dbd54273049a1ce7c1d1722bd
37,567
py
Python
src/configparserenhanced/unittests/test_ExceptionControl.py
sandialabs/ConfigParserEnhanced
93c2b32fa67c47bc2194a95a2464529c4adfaa01
[ "BSD-3-Clause" ]
2
2021-12-08T15:34:03.000Z
2021-12-21T21:54:19.000Z
src/configparserenhanced/unittests/test_ExceptionControl.py
sandialabs/ConfigParserEnhanced
93c2b32fa67c47bc2194a95a2464529c4adfaa01
[ "BSD-3-Clause" ]
null
null
null
src/configparserenhanced/unittests/test_ExceptionControl.py
sandialabs/ConfigParserEnhanced
93c2b32fa67c47bc2194a95a2464529c4adfaa01
[ "BSD-3-Clause" ]
4
2021-12-08T01:02:15.000Z
2022-01-31T14:08:57.000Z
#!/usr/bin/env python # -*- coding: utf-8; mode: python; py-indent-offset: 4; py-continuation-offset: 4 -*- #=============================================================================== # Copyright Notice # ---------------- # Copyright 2021 National Technology & Engineering Solutions of Sandia, # LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, # the U.S. Government retains certain rights in this software. # # License (3-Clause BSD) # ---------------------- # Copyright 2021 National Technology & Engineering Solutions of Sandia, # LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, # the U.S. Government retains certain rights in this software. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #=============================================================================== """ """ from __future__ import print_function import sys sys.dont_write_bytecode = True import os sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import unittest from unittest import TestCase # Coverage will always miss one of these depending on the system # and what is available. try: # pragma: no cover import unittest.mock as mock # pragma: no cover except: # pragma: no cover import mock # pragma: no cover from mock import Mock from mock import MagicMock from mock import patch try: from cStringIO import StringIO except ImportError: from io import StringIO from configparserenhanced import ExceptionControl from .common import * #=============================================================================== # # General Utility Functions # #=============================================================================== #=============================================================================== # # Mock Helpers # #=============================================================================== #=============================================================================== # # Tests # #=============================================================================== class ExceptionControlTest(TestCase): """ Main test driver for the SetEnvironment class """ def setUp(self): print("") return def test_ExceptionControl_property_exception_control_level(self): """ Test reading and setting the property `exception_control_level` """ class testme(ExceptionControl): def __init__(self): pass return inst_testme = testme() # Test default value (2) # Test value -1 (bad value should default to 0) inst_testme.exception_control_level = -1 self.assertEqual(inst_testme.exception_control_level, 0) # Test value 0 inst_testme.exception_control_level = 0 self.assertEqual(inst_testme.exception_control_level, 0) # Test value 1 inst_testme.exception_control_level = 1 self.assertEqual(inst_testme.exception_control_level, 1) # Test value 2 inst_testme.exception_control_level = 2 self.assertEqual(inst_testme.exception_control_level, 2) # Test value 3 inst_testme.exception_control_level = 3 self.assertEqual(inst_testme.exception_control_level, 3) # Test value 4 inst_testme.exception_control_level = 4 self.assertEqual(inst_testme.exception_control_level, 4) # Test value 5 inst_testme.exception_control_level = 5 self.assertEqual(inst_testme.exception_control_level, 5) # Test value 6 (bad value should default to 5) inst_testme.exception_control_level = 6 self.assertEqual(inst_testme.exception_control_level, 5) print("OK") return 0 def test_ExceptionControl_method_exception_control_event(self): class testme(ExceptionControl): def __init__(self): pass return def event_silent(self): inst_testme.exception_control_event("SILENT", ValueError, message="message text") def event_warning(self): inst_testme.exception_control_event("WARNING", ValueError, message="message text") def event_minor(self): inst_testme.exception_control_event("MINOR", ValueError, message="message text") def event_serious(self): inst_testme.exception_control_event("SERIOUS", ValueError, message="message text") def event_critical(self): inst_testme.exception_control_event("CRITICAL", ValueError, message="message text") def event_catastrophic(self): inst_testme.exception_control_event("CATASTROPHIC", ValueError, message="message text") inst_testme = testme() exception_skipped_msg_regex_01 = r"!! EXCEPTION SKIPPED" exception_skipped_msg_regex_02 = r"Message\s*:" # Default exception_control_level == 4 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertIn(exception_skipped_msg_regex_01, fake_out.getvalue()) with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_critical() # Set exception_control_level = 0 (Silent Running) inst_testme.exception_control_level = 0 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_critical() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 1 (Warnings for all, do not raise exceptions.) inst_testme.exception_control_level = 1 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_02) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_02) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_02) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_critical() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_02) with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 2 (raise CRITICAL) inst_testme.exception_control_level = 2 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) with self.assertRaises(ValueError): inst_testme.event_critical() # Set exception_control_level = 3 (raise CRITICAL, SERIOUS) inst_testme.exception_control_level = 3 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 4 (raise CRITICAL, SERIOUS, MINOR) inst_testme.exception_control_level = 4 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertRegex(fake_out.getvalue(), exception_skipped_msg_regex_01) with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 5 (raise ALL) inst_testme.exception_control_level = 5 with self.assertRaises(ValueError): inst_testme.event_silent() with self.assertRaises(ValueError): inst_testme.event_warning() with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() print("OK") return 0 def test_ExceptionControl_method_exception_control_event_nomsg(self): class testme(ExceptionControl): def __init__(self): return def event_silent_nomsg(self): inst_testme.exception_control_event("SILENT", ValueError) def event_warning_nomsg(self): inst_testme.exception_control_event("WARNING", ValueError) def event_minor_nomsg(self): inst_testme.exception_control_event("MINOR", ValueError) def event_serious_nomsg(self): inst_testme.exception_control_event("SERIOUS", ValueError) def event_critical_nomsg(self): inst_testme.exception_control_event("CRITICAL", ValueError) def event_catastrophic_nomsg(self): inst_testme.exception_control_event("CATASTROPHIC", ValueError) inst_testme = testme() # Set exception_control_level = 1 (Warnings for all, do not raise exceptions.) inst_testme.exception_control_level = 1 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent_nomsg() print(fake_out.getvalue()) self.assertNotIn("Message:", fake_out.getvalue()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning_nomsg() print(fake_out.getvalue()) self.assertNotIn("Message:", fake_out.getvalue()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor_nomsg() print(fake_out.getvalue()) self.assertNotIn("Message:", fake_out.getvalue()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious_nomsg() print(fake_out.getvalue()) self.assertNotIn("Message:", fake_out.getvalue()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_critical_nomsg() print(fake_out.getvalue()) self.assertNotIn("Message:", fake_out.getvalue()) with self.assertRaises(ValueError): inst_testme.event_catastrophic_nomsg() # Set exception_control_level = 5 (raise ALL) inst_testme.exception_control_level = 5 with self.assertRaises(ValueError): inst_testme.event_silent_nomsg() with self.assertRaises(ValueError): inst_testme.event_warning_nomsg() with self.assertRaises(ValueError): inst_testme.event_minor_nomsg() with self.assertRaises(ValueError): inst_testme.event_serious_nomsg() with self.assertRaises(ValueError): inst_testme.event_critical_nomsg() with self.assertRaises(ValueError): inst_testme.event_catastrophic_nomsg() print("OK") return 0 def test_ExceptionControl_method_exception_control_event_badexception(self): class testme(ExceptionControl): def __init__(self): pass return def event_silent(self): inst_testme.exception_control_event("SILENT", int, message="message text") def event_warning(self): inst_testme.exception_control_event("WARNING", int, message="message text") def event_minor(self): inst_testme.exception_control_event("MINOR", None, message="message text") def event_serious(self): inst_testme.exception_control_event("SERIOUS", float, message="message text") def event_critical(self): inst_testme.exception_control_event("CRITICAL", None, message="message text") def event_catastrophic(self): inst_testme.exception_control_event("CATASTROPHIC", None, message="message text") inst_testme = testme() for level in range(6): inst_testme.exception_control_level = level with self.assertRaises(TypeError): inst_testme.event_silent() with self.assertRaises(TypeError): inst_testme.event_warning() with self.assertRaises(TypeError): inst_testme.event_minor() with self.assertRaises(TypeError): inst_testme.event_serious() with self.assertRaises(TypeError): inst_testme.event_critical() with self.assertRaises(TypeError): inst_testme.event_catastrophic() print("OK") return def test_ExceptionControl_method_exception_control_event_silent_warnings(self): class testme(ExceptionControl): def __init__(self): pass return def event_silent(self): inst_testme.exception_control_event("SILENT", ValueError, message="message text") def event_warning(self): inst_testme.exception_control_event("WARNING", ValueError, message="message text") def event_minor(self): inst_testme.exception_control_event("MINOR", ValueError, message="message text") def event_serious(self): inst_testme.exception_control_event("SERIOUS", ValueError, message="message text") def event_critical(self): inst_testme.exception_control_event("CRITICAL", ValueError, message="message text") def event_catastrophic(self): inst_testme.exception_control_event("CATASTROPHIC", ValueError, message="message text") inst_testme = testme() # Check that we raise the typeerror if the assignment isn't a bool with self.assertRaises(TypeError): inst_testme.exception_control_silent_warnings = None # Enable warning suppression inst_testme.exception_control_silent_warnings = True # Default exception_control_level == 4 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 0 (Silent Running) inst_testme.exception_control_level = 0 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_critical() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 1 (Warnings for all, do not raise exceptions.) inst_testme.exception_control_level = 1 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_critical() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 2 (raise CRITICAL) inst_testme.exception_control_level = 2 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 3 (raise CRITICAL, SERIOUS) inst_testme.exception_control_level = 3 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 4 (raise CRITICAL, SERIOUS, MINOR) inst_testme.exception_control_level = 4 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(output_expect, output_actual) with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 5 (raise ALL) inst_testme.exception_control_level = 5 with self.assertRaises(ValueError): inst_testme.event_silent() with self.assertRaises(ValueError): inst_testme.event_warning() with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() print("OK") return 0 def test_ExceptionControl_method_exception_control_event_compact_warnings(self): class testme(ExceptionControl): def __init__(self): pass return def event_silent(self): inst_testme.exception_control_event("SILENT", ValueError, message="message text") def event_warning(self): inst_testme.exception_control_event("WARNING", ValueError, message="message text") def event_minor(self): inst_testme.exception_control_event("MINOR", ValueError, message="message text") def event_serious(self): inst_testme.exception_control_event("SERIOUS", ValueError, message="message text") def event_critical(self): inst_testme.exception_control_event("CRITICAL", ValueError, message="message text") def event_catastrophic(self): inst_testme.exception_control_event("CATASTROPHIC", ValueError, message="message text") inst_testme = testme() # Check that we raise the typeerror if the assignment isn't a bool with self.assertRaises(TypeError): inst_testme.exception_control_compact_warnings = None # Enable warning suppression inst_testme.exception_control_compact_warnings = True exception_msg_regex_01 = r"!! EXCEPTION SKIPPED \(WARNING : ValueError\)" exception_msg_regex_02 = r"!! EXCEPTION SKIPPED \(MINOR : ValueError\)" exception_msg_regex_03 = r"!! EXCEPTION SKIPPED \(SERIOUS : ValueError\)" exception_msg_regex_04 = r"!! EXCEPTION SKIPPED \(CRITICAL : ValueError\)" # Default exception_control_level == 4 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_expect = "" output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_01) with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 0 (Silent Running) inst_testme.exception_control_level = 0 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_critical() print(fake_out.getvalue()) self.assertEqual("", fake_out.getvalue().rstrip()) with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 1 (Warnings for all, do not raise exceptions.) inst_testme.exception_control_level = 1 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(0, len(output_actual.splitlines())) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_01) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_02) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_03) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_critical() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_04) with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 2 (raise CRITICAL) inst_testme.exception_control_level = 2 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(0, len(output_actual.splitlines())) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_01) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_02) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_serious() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_03) with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 3 (raise CRITICAL, SERIOUS) inst_testme.exception_control_level = 3 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(0, len(output_actual.splitlines())) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_01) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_minor() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_02) with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 4 (raise CRITICAL, SERIOUS, MINOR) inst_testme.exception_control_level = 4 with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_silent() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(0, len(output_actual.splitlines())) with patch('sys.stdout', new=StringIO()) as fake_out: inst_testme.event_warning() output_actual = fake_out.getvalue().strip() print(output_actual) self.assertEqual(1, len(output_actual.splitlines())) self.assertRegex(output_actual, exception_msg_regex_01) with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() # Set exception_control_level = 5 (raise ALL) inst_testme.exception_control_level = 5 with self.assertRaises(ValueError): inst_testme.event_silent() with self.assertRaises(ValueError): inst_testme.event_warning() with self.assertRaises(ValueError): inst_testme.event_minor() with self.assertRaises(ValueError): inst_testme.event_serious() with self.assertRaises(ValueError): inst_testme.event_critical() with self.assertRaises(ValueError): inst_testme.event_catastrophic() print("OK") return 0 # EOF
37.567
103
0.629861
4,072
37,567
5.542485
0.06999
0.096593
0.093713
0.081794
0.897913
0.887944
0.872746
0.846958
0.81643
0.807435
0
0.006652
0.259643
37,567
999
104
37.604605
0.804804
0.122421
0
0.901216
0
0
0.043719
0
0
0
0
0
0.25228
1
0.06535
false
0.007599
0.022796
0.00152
0.118541
0.112462
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
e288b5f9ace7bb4539acbd54cf4d92334d846865
79
py
Python
src/lesson_mathematics/math_log.py
jasonwee/asus-rt-n14uhp-mrtg
4fa96c3406e32ea6631ce447db6d19d70b2cd061
[ "Apache-2.0" ]
3
2018-08-14T09:33:52.000Z
2022-03-21T12:31:58.000Z
src/lesson_mathematics/math_log.py
jasonwee/asus-rt-n14uhp-mrtg
4fa96c3406e32ea6631ce447db6d19d70b2cd061
[ "Apache-2.0" ]
null
null
null
src/lesson_mathematics/math_log.py
jasonwee/asus-rt-n14uhp-mrtg
4fa96c3406e32ea6631ce447db6d19d70b2cd061
[ "Apache-2.0" ]
null
null
null
import math print(math.log(8)) print(math.log(8, 2)) print(math.log(0.5, 2))
11.285714
23
0.658228
17
79
3.058824
0.470588
0.519231
0.692308
0.5
0
0
0
0
0
0
0
0.085714
0.113924
79
6
24
13.166667
0.657143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.25
0
0.25
0.75
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
7
2c47ca16647e30b7d3c5464aa3de82a794ce0a23
6,360
py
Python
games/tasks/update_switch_eu.py
JeffersonBC/eshop-index-back
66a11ad2ee86b9cd4dd49bcb34f676db3281153b
[ "MIT" ]
null
null
null
games/tasks/update_switch_eu.py
JeffersonBC/eshop-index-back
66a11ad2ee86b9cd4dd49bcb34f676db3281153b
[ "MIT" ]
null
null
null
games/tasks/update_switch_eu.py
JeffersonBC/eshop-index-back
66a11ad2ee86b9cd4dd49bcb34f676db3281153b
[ "MIT" ]
null
null
null
from celery import shared_task from requests.exceptions import Timeout, ConnectionError import requests from classification.models.tag import TagGroup from games.models import SwitchGameEU from games.serializers import SwitchGameEUSerializer from games.tasks.update_utils import treated_request, create_tag_if_not_exists @shared_task() def update_switch_eu(): print('Updating Switch EU games...') url = 'http://search.nintendo-europe.com/en/select' params = { 'fq': 'type:GAME AND system_type:nintendoswitch* AND product_code_txt:*', 'q': '*', 'rows': 9999, 'sort': 'sorting_title asc', 'start': 0, 'wt': 'json', } # Make the request, and stop the task if there's any problem req = treated_request(url, params, 'EU Switch games') if req is None: return tag_group_publisher, tag_group_pub_created = \ TagGroup.objects.get_or_create(name='Publisher') tag_group_developer, tag_group_dev_created = \ TagGroup.objects.get_or_create(name='Developer') tag_group_age, tag_group_age_created = \ TagGroup.objects.get_or_create(name='Age Rating') tag_group_characteristics, tag_group_created = \ TagGroup.objects.get_or_create(name='Characteristics') # Add every game to the database print('{} games found'.format(len(req.json()['response']['docs']))) for game in req.json()['response']['docs']: if SwitchGameEU.objects.filter( game_code_unique=game['product_code_txt'][0].strip()[4:9]).exists(): continue serializer = SwitchGameEUSerializer(data=game) if serializer.is_valid(): # print('Added: {}'.format(game['title'])) switch_game_eu = serializer.save() # If game has a publisher defined, add it as a tag if 'developer' in game: create_tag_if_not_exists( game['developer'], tag_group_developer, switch_game_eu.switchgame) # If game has a publisher defined, add it as a tag if 'publisher' in game: create_tag_if_not_exists( game['publisher'], tag_group_publisher, switch_game_eu.switchgame) # If game has an age rating defined, add it as a tag if 'age_rating_sorting_i' in game and game['age_rating_sorting_i'] != 0: create_tag_if_not_exists( 'PEGI ' + str(game['age_rating_sorting_i']), tag_group_age, switch_game_eu.switchgame) # If game has physical version set to true if 'physical_version_b' in game and game['physical_version_b'] == True: create_tag_if_not_exists( 'Physical Release', tag_group_characteristics, switch_game_eu.switchgame) else: print('[ERROR] ({}): {}'.format(game['title'], serializer.errors)) # One off task made to update the production database @shared_task() def update_switch_eu_age_tag(): print('Updating Switch EU games age rating...') url = 'http://search.nintendo-europe.com/en/select' params = { 'fq': 'type:GAME AND system_type:nintendoswitch* AND product_code_txt:*', 'q': '*', 'rows': 9999, 'sort': 'sorting_title asc', 'start': 0, 'wt': 'json', } # Make the request, and stop the task if there's any problem req = treated_request(url, params, 'EU Switch games') if req is None: return # Create/ Get the 'Age Rating' Tag Group tag_group_age, tag_group_created = \ TagGroup.objects.get_or_create(name='Age Rating') # Adds age rating tags for every game already on the database print('{} games found'.format(len(req.json()['response']['docs']))) for game in req.json()['response']['docs']: if not SwitchGameEU.objects.filter( game_code_unique=game['product_code_txt'][0].strip()[4:9]).exists(): continue serializer = SwitchGameEUSerializer(data=game) if serializer.is_valid(): switch_game_eu = SwitchGameEU.objects.get( game_code_unique=game['product_code_txt'][0].strip()[4:9]) # If game has an age rating defined, add it as a tag if 'age_rating_sorting_i' in game and game['age_rating_sorting_i'] != 0: create_tag_if_not_exists( 'PEGI ' + str(game['age_rating_sorting_i']), tag_group_age, switch_game_eu.switchgame) # One off task made to update the production database @shared_task() def update_switch_eu_physical_tag(): print('Updating Switch EU games physical release tag...') url = 'http://search.nintendo-europe.com/en/select' params = { 'fq': 'type:GAME AND system_type:nintendoswitch* AND product_code_txt:*', 'q': '*', 'rows': 9999, 'sort': 'sorting_title asc', 'start': 0, 'wt': 'json', } # Make the request, and stop the task if there's any problem req = treated_request(url, params, 'EU Switch games') if req is None: return # Create/ Get the 'Characteristics' Tag Group tag_group_characteristics, tag_group_created = \ TagGroup.objects.get_or_create(name='Characteristics') # Adds physical release tags for every game already on the database print('{} games found'.format(len(req.json()['response']['docs']))) for game in req.json()['response']['docs']: if not SwitchGameEU.objects.filter( game_code_unique=game['product_code_txt'][0].strip()[4:9]).exists(): continue serializer = SwitchGameEUSerializer(data=game) if serializer.is_valid(): switch_game_eu = SwitchGameEU.objects.get( game_code_unique=game['product_code_txt'][0].strip()[4:9]) # If game has physical version set to true if 'physical_version_b' in game and game['physical_version_b'] == True: create_tag_if_not_exists( 'Physical Release', tag_group_characteristics, switch_game_eu.switchgame)
35.730337
84
0.613836
787
6,360
4.735705
0.168996
0.04293
0.028978
0.026295
0.832573
0.812181
0.789375
0.757714
0.741615
0.714784
0
0.006967
0.27783
6,360
177
85
35.932203
0.804485
0.132547
0
0.737705
0
0
0.209493
0.01473
0
0
0
0
0
1
0.02459
false
0
0.057377
0
0.106557
0.057377
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
2c9d0457919a7a0d6126ff0d9cd4aad96ff975da
8,750
py
Python
hatvp/migrations/0033_auto_20200128_1651.py
WilliamLafarie/hatvp
76e856ec53e51f5a214a217bb07d15426269e7d7
[ "MIT" ]
null
null
null
hatvp/migrations/0033_auto_20200128_1651.py
WilliamLafarie/hatvp
76e856ec53e51f5a214a217bb07d15426269e7d7
[ "MIT" ]
null
null
null
hatvp/migrations/0033_auto_20200128_1651.py
WilliamLafarie/hatvp
76e856ec53e51f5a214a217bb07d15426269e7d7
[ "MIT" ]
null
null
null
# Generated by Django 3.0 on 2020-01-28 15:51 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('hatvp', '0032_auto_20200128_0952'), ] operations = [ migrations.AlterField( model_name='actions_menees', name='action_menee', field=models.CharField(blank=True, max_length=123, null=True), ), migrations.AlterField( model_name='actions_menees', name='action_menee_autre', field=models.CharField(blank=True, max_length=210, null=True), ), migrations.AlterField( model_name='affiliations', name='denomination_affiliation', field=models.CharField(blank=True, max_length=145, null=True), ), migrations.AlterField( model_name='affiliations', name='identifiant_national_affiliation', field=models.CharField(blank=True, max_length=20, null=True), ), migrations.AlterField( model_name='affiliations', name='type_identifiant_national_affiliation', field=models.CharField(blank=True, max_length=15, null=True), ), migrations.AlterField( model_name='beneficiaires', name='beneficiaire_action_menee', field=models.CharField(blank=True, max_length=135, null=True), ), migrations.AlterField( model_name='clients', name='denomination_client', field=models.CharField(blank=True, max_length=135, null=True), ), migrations.AlterField( model_name='clients', name='identifiant_national_client', field=models.CharField(blank=True, max_length=20, null=True), ), migrations.AlterField( model_name='clients', name='type_identifiant_national_client', field=models.CharField(blank=True, max_length=15, null=True), ), migrations.AlterField( model_name='collaborateurs', name='civilite_collaborateur', field=models.CharField(blank=True, max_length=5, null=True), ), migrations.AlterField( model_name='collaborateurs', name='fonction_collaborateur', field=models.CharField(blank=True, max_length=200, null=True), ), migrations.AlterField( model_name='collaborateurs', name='nom_collaborateur', field=models.CharField(blank=True, max_length=41, null=True), ), migrations.AlterField( model_name='collaborateurs', name='nom_prenom_collaborateur', field=models.CharField(blank=True, max_length=60, null=True), ), migrations.AlterField( model_name='collaborateurs', name='prenom_collaborateur', field=models.CharField(blank=True, max_length=44, null=True), ), migrations.AlterField( model_name='decisions_concernees', name='decision_concernee', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='dirigeants', name='civilite_dirigeant', field=models.CharField(blank=True, max_length=13, null=True), ), migrations.AlterField( model_name='dirigeants', name='fonction_dirigeant', field=models.CharField(blank=True, max_length=119, null=True), ), migrations.AlterField( model_name='dirigeants', name='nom_dirigeant', field=models.CharField(blank=True, max_length=39, null=True), ), migrations.AlterField( model_name='dirigeants', name='nom_prenom_dirigeant', field=models.CharField(blank=True, max_length=45, null=True), ), migrations.AlterField( model_name='dirigeants', name='prenom_dirigeant', field=models.CharField(blank=True, max_length=31, null=True), ), migrations.AlterField( model_name='domaines_intervention', name='domaines_intervention_actions_menees', field=models.CharField(blank=True, max_length=55, null=True), ), migrations.AlterField( model_name='exercices', name='chiffre_affaires', field=models.CharField(blank=True, max_length=100, null=True), ), migrations.AlterField( model_name='exercices', name='montant_depense', field=models.CharField(blank=True, max_length=42, null=True), ), migrations.AlterField( model_name='exercices', name='nombre_salaries', field=models.CharField(blank=True, max_length=17, null=True), ), migrations.AlterField( model_name='informations_generales', name='adresse', field=models.CharField(blank=True, max_length=170, null=True), ), migrations.AlterField( model_name='informations_generales', name='code_postal', field=models.CharField(blank=True, max_length=20, null=True), ), migrations.AlterField( model_name='informations_generales', name='denomination', field=models.CharField(blank=True, max_length=135, null=True), ), migrations.AlterField( model_name='informations_generales', name='identifiant_national', field=models.CharField(blank=True, max_length=20, null=True), ), migrations.AlterField( model_name='informations_generales', name='label_categorie_organisation', field=models.CharField(blank=True, max_length=80, null=True), ), migrations.AlterField( model_name='informations_generales', name='nom_usage_HATVP', field=models.CharField(blank=True, max_length=127, null=True), ), migrations.AlterField( model_name='informations_generales', name='page_facebook', field=models.CharField(blank=True, max_length=150, null=True), ), migrations.AlterField( model_name='informations_generales', name='page_linkedin', field=models.CharField(blank=True, max_length=280, null=True), ), migrations.AlterField( model_name='informations_generales', name='page_twitter', field=models.CharField(blank=True, max_length=105, null=True), ), migrations.AlterField( model_name='informations_generales', name='pays', field=models.CharField(blank=True, max_length=21, null=True), ), migrations.AlterField( model_name='informations_generales', name='sigle_HATVP', field=models.CharField(blank=True, max_length=46, null=True), ), migrations.AlterField( model_name='informations_generales', name='site_web', field=models.CharField(blank=True, max_length=115, null=True), ), migrations.AlterField( model_name='informations_generales', name='type_identifiant_national', field=models.CharField(blank=True, max_length=15, null=True), ), migrations.AlterField( model_name='informations_generales', name='ville', field=models.CharField(blank=True, max_length=40, null=True), ), migrations.AlterField( model_name='niveaux_intervention', name='niveau_intervention', field=models.CharField(blank=True, max_length=18, null=True), ), migrations.AlterField( model_name='objets_activites', name='identifiant_fiche', field=models.CharField(blank=True, max_length=18, null=True), ), migrations.AlterField( model_name='objets_activites', name='objet_activite', field=models.CharField(blank=True, max_length=210, null=True), ), migrations.AlterField( model_name='observations', name='observation', field=models.CharField(blank=True, max_length=710, null=True), ), migrations.AlterField( model_name='secteur_activites', name='secteur_activite', field=models.CharField(blank=True, max_length=59, null=True), ), ]
38.209607
74
0.592914
834
8,750
6.023981
0.157074
0.171178
0.213973
0.248209
0.863854
0.863854
0.832803
0.742038
0.554538
0.329817
0
0.021594
0.296114
8,750
228
75
38.377193
0.794122
0.004914
0
0.626126
1
0
0.170017
0.078805
0
0
0
0
0
1
0
false
0
0.004505
0
0.018018
0
0
0
0
null
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
2cb6e163db7cde4238729d25467cca8506a8f666
130
py
Python
snnw/nn/activation/__init__.py
juliustao/SNNW
2051c81b4013030d67fdfdcb1dd2973ba550ddd9
[ "MIT" ]
null
null
null
snnw/nn/activation/__init__.py
juliustao/SNNW
2051c81b4013030d67fdfdcb1dd2973ba550ddd9
[ "MIT" ]
null
null
null
snnw/nn/activation/__init__.py
juliustao/SNNW
2051c81b4013030d67fdfdcb1dd2973ba550ddd9
[ "MIT" ]
null
null
null
import snnw.nn.activation.relu import snnw.nn.activation.sigmoid import snnw.nn.activation.softmax import snnw.nn.activation.tanh
26
33
0.846154
20
130
5.5
0.4
0.363636
0.436364
0.8
0
0
0
0
0
0
0
0
0.061538
130
4
34
32.5
0.901639
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8
e2bd7d35c45752b5298ec6a7a890f74a7ef9a059
16,549
py
Python
train_model.py
YanYangB/disambiguation
068332dfc794c5fac848763e7d3116431a50d861
[ "MIT" ]
22
2019-12-05T12:25:33.000Z
2021-08-18T08:09:20.000Z
train_model.py
OriginalAspiration/disambiguation
15615951a7d35de5ab654393acecb9dcf850d426
[ "MIT" ]
2
2020-03-18T01:58:43.000Z
2020-11-29T08:07:46.000Z
train_model.py
OriginalAspiration/disambiguation
15615951a7d35de5ab654393acecb9dcf850d426
[ "MIT" ]
10
2019-12-05T13:05:12.000Z
2020-11-23T11:42:35.000Z
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- ''' @File : train_model.py @Time : 2019/12/05 13:49:58 @Author : Yan Yang @Contact : yanyangbupt@gmail.com @Desc : None ''' # .::::. # .::::::::. # ::::::::::: # ..:::::::::::' # '::::::::::::' # .:::::::::: # '::::::::::::::.. # ..::::::::::::. # ``:::::::::::::::: # ::::``:::::::::' .:::. # ::::' ':::::' .::::::::. # .::::' :::: .:::::::'::::. # .:::' ::::: .:::::::::' ':::::. # .::' :::::.:::::::::' ':::::. # .::' ::::::::::::::' ``::::. # ...::: ::::::::::::' ``::. # ```` ':. ':::::::::' ::::.. # '.:::::' ':'````.. # 美女保佑 永无BUG from channel2_v2 import * import os from xgboost import XGBClassifier from catboost import CatBoostClassifier from lightgbm import LGBMClassifier from sklearn.ensemble import RandomForestClassifier add_text_feature_for_train() create_feature() models = [ { # 0.85926333738039 original best 'model_path': os.path.join(STACK_MODEL_DIR_v2, 'sm-191125-nosetinfo-extend3-sample11.pkl'), 'ss_path': os.path.join(STACK_MODEL_DIR_v2, 'standardscaler-last1year-nosetinfo-extend3-sample11.pkl'), 'cols': BASE_COLS, 'score': 0.85926333738039, 'name': 'sm-191125-nosetinfo-extend3-sample11.pkl', 'model': [ [ CatBoostClassifier( iterations=180, learning_rate=0.1, depth=7, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), CatBoostClassifier( iterations=500, learning_rate=0.1, depth=4, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), XGBClassifier( max_depth=7, learning_rate=0.05, n_estimators=180, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), XGBClassifier( max_depth=4, learning_rate=0.05, n_estimators=350, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=7, learning_rate=0.01, n_estimators=800, objective='binary', subsample=0.8, n_jobs=23, num_leaves=82, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=4, learning_rate=0.01, n_estimators=2000, objective='binary', subsample=0.8, n_jobs=23, num_leaves=12, random_state=RANDOM_SEED ), RandomForestClassifier( n_estimators=1000, max_depth=35, n_jobs=-1, verbose=0, random_state=RANDOM_SEED ), ], [ CatBoostClassifier( iterations=150, learning_rate=0.1, depth=2, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), ], ], 'model_param': [ [ {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {}, ], [ {'verbose': False}, ], ], }, { # 0.858031834386063 with set info 'model_path': os.path.join(STACK_MODEL_DIR_v2, 'test-2-sm-191127-withsetinfo-sample11.pkl'), 'ss_path': os.path.join(STACK_MODEL_DIR_v2, 'standardscaler-last1year-withsetinfo-sample11.pkl'), 'cols': BASE_COLS + SET_INFO_COLS, 'score': 0.858031834386063, 'name': 'test-2-sm-191127-withsetinfo-sample11.pkl', 'model': [ [ CatBoostClassifier( iterations=400, learning_rate=0.05, depth=7, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), CatBoostClassifier( iterations=1000, learning_rate=0.05, depth=4, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), XGBClassifier( max_depth=7, learning_rate=0.05, n_estimators=180, subsample=0.8, n_jobs=-1, min_child_weight=4, random_state=RANDOM_SEED ), XGBClassifier( max_depth=4, learning_rate=0.03, n_estimators=500, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=7, learning_rate=0.01, n_estimators=1000, objective='binary', subsample=0.8, n_jobs=23, num_leaves=35, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=4, learning_rate=0.01, n_estimators=3500, objective='binary', subsample=0.8, n_jobs=23, num_leaves=5, random_state=RANDOM_SEED ), RandomForestClassifier( n_estimators=1000, max_depth=35, n_jobs=-1, verbose=0, random_state=RANDOM_SEED ), ], [ CatBoostClassifier( iterations=800, learning_rate=0.01, depth=3, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), ], ], 'model_param': [ [ {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {}, ], [ {'verbose': False}, ], ], }, { # 0.856180351089599 with set info 'model_path': os.path.join(STACK_MODEL_DIR_v2, 'sm-191127-withsetinfo-11.pkl'), 'ss_path': os.path.join(STACK_MODEL_DIR_v2, 'standardscaler-last1year-withsetinfo-11.pkl'), 'cols': BASE_COLS + SET_INFO_COLS, 'score': 0.856180351089599, 'name': 'sm-191127-withsetinfo-11.pkl', 'model': [ [ CatBoostClassifier( iterations=400, learning_rate=0.05, depth=7, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), CatBoostClassifier( iterations=1000, learning_rate=0.05, depth=4, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), XGBClassifier( max_depth=7, learning_rate=0.05, n_estimators=180, subsample=0.8, n_jobs=-1, min_child_weight=4, random_state=RANDOM_SEED ), XGBClassifier( max_depth=4, learning_rate=0.03, n_estimators=500, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=7, learning_rate=0.01, n_estimators=1000, objective='binary', subsample=0.8, n_jobs=23, num_leaves=35, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=4, learning_rate=0.01, n_estimators=3500, objective='binary', subsample=0.8, n_jobs=23, num_leaves=5, random_state=RANDOM_SEED ), RandomForestClassifier( n_estimators=1000, max_depth=35, n_jobs=-1, verbose=0, random_state=RANDOM_SEED ), ], [ CatBoostClassifier( iterations=800, learning_rate=0.01, depth=3, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), ], ], 'model_param': [ [ {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {}, ], [ {'verbose': False}, ], ], }, { # 0.855763586778158 with set info and title info 'model_path': os.path.join(STACK_MODEL_DIR_v2, 'sm-191128-withsetinfo-title-11-norf.pkl'), 'ss_path': os.path.join(STACK_MODEL_DIR_v2, 'standardscaler-last1year-withsetinfo-title-11.pkl'), 'cols': BASE_COLS + SET_INFO_COLS + TITLE_COLS, 'score': 0.855763586778158, 'name': 'sm-191128-withsetinfo-title-11-norf.pkl', 'model': [ [ CatBoostClassifier( iterations=320, learning_rate=0.05, depth=7, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), CatBoostClassifier( iterations=900, learning_rate=0.05, depth=4, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), XGBClassifier( max_depth=7, learning_rate=0.05, n_estimators=180, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), XGBClassifier( max_depth=4, learning_rate=0.03, n_estimators=500, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=7, learning_rate=0.01, n_estimators=1000, objective='binary', subsample=0.8, n_jobs=-1, num_leaves=82, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=4, learning_rate=0.01, n_estimators=3500, objective='binary', subsample=0.8, n_jobs=-1, num_leaves=5, random_state=RANDOM_SEED ), ], [ CatBoostClassifier( iterations=1200, learning_rate=0.01, depth=2, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), ], ], 'model_param': [ [ {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, ], [ {'verbose': False}, ], ], }, { # 0.85364791527539 'model_path': os.path.join(STACK_MODEL_DIR_v2, 'sm-191126-withsetinfo-sample11.pkl'), 'ss_path': os.path.join(STACK_MODEL_DIR_v2, 'standardscaler-last1year-withsetinfo-sample11.pkl'), 'cols': BASE_COLS + SET_INFO_COLS, 'score': 0.85364791527539, 'name': 'sm-191126-withsetinfo-sample11.pkl', 'model': [ [ CatBoostClassifier( iterations=180, learning_rate=0.1, depth=7, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), CatBoostClassifier( iterations=500, learning_rate=0.1, depth=4, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), XGBClassifier( max_depth=7, learning_rate=0.05, n_estimators=180, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), XGBClassifier( max_depth=4, learning_rate=0.05, n_estimators=350, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=7, learning_rate=0.01, n_estimators=800, objective='binary', subsample=0.8, n_jobs=-1, num_leaves=82, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=4, learning_rate=0.01, n_estimators=2000, objective='binary', subsample=0.8, n_jobs=-1, num_leaves=12, random_state=RANDOM_SEED ), RandomForestClassifier( n_estimators=1000, max_depth=35, n_jobs=-1, verbose=0, random_state=RANDOM_SEED ), ], [ CatBoostClassifier( iterations=150, learning_rate=0.1, depth=2, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), ], ], 'model_param': [ [ {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {}, ], [ {'verbose': False}, ], ], }, { # 0.855538436984147 'model_path': os.path.join(STACK_MODEL_DIR_v2, 'sm-191128-withsetinfo-title-11.pkl'), 'ss_path': os.path.join(STACK_MODEL_DIR_v2, 'standardscaler-last1year-withsetinfo-title-11.pkl'), 'cols': BASE_COLS + SET_INFO_COLS + TITLE_COLS, 'score': 0.855538436984147, 'name': 'sm-191128-withsetinfo-title-11.pkl', 'model': [ [ CatBoostClassifier( iterations=320, learning_rate=0.05, depth=7, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), CatBoostClassifier( iterations=900, learning_rate=0.05, depth=4, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), XGBClassifier( max_depth=7, learning_rate=0.05, n_estimators=180, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), XGBClassifier( max_depth=4, learning_rate=0.03, n_estimators=500, subsample=0.8, n_jobs=-1, min_child_weight=6, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=7, learning_rate=0.01, n_estimators=1000, objective='binary', subsample=0.8, n_jobs=-1, num_leaves=82, random_state=RANDOM_SEED ), LGBMClassifier( max_depth=4, learning_rate=0.01, n_estimators=3500, objective='binary', subsample=0.8, n_jobs=-1, num_leaves=5, random_state=RANDOM_SEED ), RandomForestClassifier( n_estimators=1000, max_depth=60, n_jobs=-1, verbose=0, random_state=RANDOM_SEED ), ], [ CatBoostClassifier( iterations=1200, learning_rate=0.01, depth=2, loss_function='Logloss', eval_metric='Logloss', task_type='GPU', random_seed=RANDOM_SEED ), ], ], 'model_param': [ [ {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {'verbose': False}, {}, ], [ {'verbose': False}, ], ], }, ] for model_info in models: print('--'*50) train(model_info)
42.652062
111
0.485588
1,560
16,549
4.898718
0.092949
0.085056
0.071447
0.113059
0.918739
0.908924
0.897016
0.886679
0.886417
0.88223
0
0.077747
0.38522
16,549
387
112
42.762274
0.673383
0.068645
0
0.792717
0
0
0.10898
0.047207
0
0
0
0
0
1
0
false
0
0.016807
0
0.016807
0.002801
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
e2d2b71438d7c0b7197e57702ca78fe7d6fa7c07
12,164
py
Python
roch_gpsr/scripts/slam_goto.py
FaiScofield/roch_GPSR
268e37075a6d692ca1235ad759dc73beaeaab8bf
[ "BSD-2-Clause" ]
1
2019-07-22T01:26:50.000Z
2019-07-22T01:26:50.000Z
roch_gpsr/scripts/slam_goto.py
FaiScofield/roch_GPSR
268e37075a6d692ca1235ad759dc73beaeaab8bf
[ "BSD-2-Clause" ]
null
null
null
roch_gpsr/scripts/slam_goto.py
FaiScofield/roch_GPSR
268e37075a6d692ca1235ad759dc73beaeaab8bf
[ "BSD-2-Clause" ]
1
2020-08-09T01:02:58.000Z
2020-08-09T01:02:58.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- #-------------------------------------------------- #SLAM移动ROS节点 # #author: Vance Wu #date: 17/07/31 #-------------------------------------------------- import sys import roslib sys.path.append(roslib.packages.get_pkg_dir('roch_gpsr') + '/scripts') from common_import import * from common_function import * #-------------------------------------------------- #全局变量 #-------------------------------------------------- move_base_cmd_vel = Twist() #-------------------------------------------------- #-------------------------------------------------- def subf_move_base_cmd_vel(sub_move_base_cmd_vel): global move_base_cmd_vel move_base_cmd_vel = sub_move_base_cmd_vel #-------------------------------------------------- #-------------------------------------------------- if __name__ == '__main__': node_name = os.path.basename(__file__) node_name = node_name.split('.') rospy.init_node(node_name[0]) if not rospy.is_shutdown: commonf_speech_multi('前往目的地中') rospy.Subscriber("/move_base/cmd_vel", Twist, subf_move_base_cmd_vel) target_x = float(rospy.get_param('/param/gpsr/slam_goal/x')) target_y = float(rospy.get_param('/param/gpsr/slam_goal/y')) target_yaw = float(rospy.get_param('/param/gpsr/slam_goal/yaw')) th_trans = 0.2 tf_listener = tf.TransformListener() main_rate = rospy.Rate(30) while not rospy.is_shutdown(): while not rospy.is_shutdown(): try: (translation, rotation) = tf_listener.lookupTransform('/map', '/base_footprint', rospy.Time(0)) except: continue break euler = euler_from_quaternion([rotation[0], rotation[1], rotation[2], rotation[3]]) if abs(target_x - translation[0]) > th_trans or abs(target_y - translation[1]) > th_trans: if abs(move_base_cmd_vel.linear.x) < 0.1 and abs(move_base_cmd_vel.angular.z) < 0.261: if move_base_cmd_vel.angular.z > 0: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.261) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.261) if move_base_cmd_vel.angular.z > 0.5: commonf_pubf_cmd_vel(move_base_cmd_vel.linear.x, 0, 0, 0, 0, 0.5) elif move_base_cmd_vel.angular.z < -0.5: commonf_pubf_cmd_vel(move_base_cmd_vel.linear.x, 0, 0, 0, 0, -0.5) else: commonf_pubf_cmd_vel(move_base_cmd_vel.linear.x, 0, 0, 0, 0, move_base_cmd_vel.angular.z) else: if th_trans == 0.2: th_trans = 0.3 # x # | # 1 | 4 # | # y------- # | # 2 | 3 # | if target_yaw > 0: if target_yaw < 1.57: if euler[2] > 0: if euler[2] < 1.57: #目標: 1 #現在: 1 if abs(target_yaw - euler[2]) > 0.262: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(target_yaw - euler[2]) > 0.087: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break else: #目標: 1 #現在: 2 if abs(target_yaw - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(target_yaw - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break else: if euler[2] < -1.57: #目標: 1 #現在: 3 commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: #目標: 1 #現在: 4 if abs(0 - target_yaw) + abs(0 - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) elif abs(0 - target_yaw) + abs(0 - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: break else: if euler[2] > 0: if euler[2] < 1.57: #目標: 2 #現在: 1 if abs(target_yaw - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) elif abs(target_yaw - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: break else: #目標: 2 #現在: 2 if abs(target_yaw - euler[2]) > 0.262: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(target_yaw - euler[2]) > 0.087: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break else: if euler[2] < -1.57: #目標: 2 #現在: 3 if abs(math.pi - target_yaw) + abs(-math.pi - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(math.pi - target_yaw) + abs(-math.pi - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break else: #目標: 2 #現在: 4 commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: if target_yaw < -1.57: if euler[2] > 0: if euler[2] < 1.57: #目標: 3 #現在: 1 commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: #目標: 3 #現在: 2 if abs(-math.pi - target_yaw) + abs(math.pi - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) elif abs(-math.pi - target_yaw) + abs(math.pi - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: break else: if euler[2] < -1.57: #目標: 3 #現在: 3 if abs(target_yaw - euler[2]) > 0.262: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(target_yaw - euler[2]) > 0.087: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break else: #目標: 3 #現在: 4 if abs(target_yaw - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(target_yaw - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break else: if euler[2] > 0: if euler[2] < 1.57: #目標: 4 #現在: 1 if abs(0 - target_yaw) + abs(0 - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(math.pi - target_yaw) + abs(-math.pi - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break else: #目標: 4 #現在: 2 commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: if euler[2] < -1.57: #目標: 4 #現在: 3 if abs(target_yaw - euler[2]) > 0.262: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) elif abs(target_yaw - euler[2]) > 0.087: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: break else: #目標: 4 #現在: 4 if abs(target_yaw - euler[2]) > 0.262: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.349) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.349) elif abs(target_yaw - euler[2]) > 0.087: if target_yaw > euler[2]: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0.174) else: commonf_pubf_cmd_vel(0, 0, 0, 0, 0, -0.174) else: break #main_rate.sleep() commonf_pubf_cmd_vel(0, 0, 0, 0, 0, 0) commonf_speech_multi('已经达到目的地了呢.') sys.exit(0)
46.965251
118
0.316919
1,210
12,164
2.965289
0.094215
0.114827
0.137124
0.136009
0.79097
0.75864
0.750836
0.738573
0.701784
0.680602
0
0.115833
0.562808
12,164
258
119
47.147287
0.558857
0.06174
0
0.724719
0
0
0.013208
0.006252
0
0
0
0
0
1
0.005618
false
0
0.022472
0
0.02809
0.005618
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
1
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
393f49507f686db0cd9dfd288f1c198145213641
100
py
Python
lib/activation_functions.py
koreander2001/deep-learning-from-scratch
6f8c4d97ed1dca9cbe7d1ea3e71e7e67275d129c
[ "MIT" ]
null
null
null
lib/activation_functions.py
koreander2001/deep-learning-from-scratch
6f8c4d97ed1dca9cbe7d1ea3e71e7e67275d129c
[ "MIT" ]
null
null
null
lib/activation_functions.py
koreander2001/deep-learning-from-scratch
6f8c4d97ed1dca9cbe7d1ea3e71e7e67275d129c
[ "MIT" ]
null
null
null
import numpy as np def step_function(X: np.ndarray) -> np.ndarray: return (X > 0).astype(int)
16.666667
47
0.67
17
100
3.882353
0.764706
0.272727
0
0
0
0
0
0
0
0
0
0.012346
0.19
100
5
48
20
0.802469
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
7
1a96717ec99b1ef161cb2bf4b86ff02f7b4bc430
175
py
Python
generated-libraries/python/netapp/volume/storage_service_name.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
2
2017-03-28T15:31:26.000Z
2018-08-16T22:15:18.000Z
generated-libraries/python/netapp/volume/storage_service_name.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
null
null
null
generated-libraries/python/netapp/volume/storage_service_name.py
radekg/netapp-ontap-lib-get
6445ebb071ec147ea82a486fbe9f094c56c5c40d
[ "MIT" ]
null
null
null
class StorageServiceName(basestring): """ The Storage Service Name """ @staticmethod def get_api_name(): return "storage-service-name"
17.5
39
0.6
16
175
6.4375
0.75
0.271845
0.349515
0
0
0
0
0
0
0
0
0
0.302857
175
9
40
19.444444
0.844262
0.137143
0
0
0
0
0.148148
0
0
0
0
0
0
1
0.25
true
0
0
0.25
0.75
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
1
1
0
0
8
1abd6dcd038da6474ff41d59c8b7654c4eef1160
625
py
Python
cases/listSlicing.py
minakoyang/YY_python2.7_interpreter_in_CPP
e949f4bbd27752e6dbfef0a887d9567345d512f4
[ "MIT" ]
1
2019-04-30T16:27:19.000Z
2019-04-30T16:27:19.000Z
cases/listSlicing.py
minakoyang/YY_python2.7_interpreter_in_CPP
e949f4bbd27752e6dbfef0a887d9567345d512f4
[ "MIT" ]
null
null
null
cases/listSlicing.py
minakoyang/YY_python2.7_interpreter_in_CPP
e949f4bbd27752e6dbfef0a887d9567345d512f4
[ "MIT" ]
null
null
null
a = ["Hello", 2.00, 4, 4 + 5, 2 * 4.9, "World" * 3, "abc"[0], "abc"[::], 3 + 2.0, 3 ** 4, 3 * "a"] print a[1:3:1] print a[0:10:2] print a[1:10:] print a[:10:2] print a[1::3] print a[5::] print a[:10:] print a[::3] print a[::] print a[1:10] print a[1:5:1][1:3:1] print a[1:5:1][::] print a[1:5:-1] print a[5:0:-1] print a[10:0:-2] print a[:10:-2] print a[5::-3] print a[::-1] print a[1:5:1][::-1] print a[-1:-10:-1] print a[-5:-1:-1] print a[-1:-10:-2] print a[:-10:-2] print a[-5::-3] print a[-1:-10:-2][-1:-5:-1] print a[-10:-1] print a[:-1] print a[-10:] print a[-10:-1][-5:-1] print a[23:143:34] print a[325:43:-234]
16.891892
98
0.5136
161
625
1.993789
0.124224
0.579439
0.283489
0.149533
0.682243
0.464174
0.308411
0.233645
0.155763
0.155763
0
0.219512
0.1472
625
36
99
17.361111
0.382739
0
0
0
0
0
0.0272
0
0
0
0
0
0
0
null
null
0
0
null
null
0.96875
0
0
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
7
46fae4ed3a33563a76c5f4b1c54bd3644bf4d63e
57,250
py
Python
postprocess/funciones.py
pydataglobal/tut-322-isabel-yepes
7f929823ca46b8cca6e8de18dbd69dde0e221ce7
[ "MIT" ]
2
2020-11-13T19:50:04.000Z
2021-02-26T17:10:52.000Z
postprocess/funciones.py
pydataglobal/tut-322-isabel-yepes
7f929823ca46b8cca6e8de18dbd69dde0e221ce7
[ "MIT" ]
null
null
null
postprocess/funciones.py
pydataglobal/tut-322-isabel-yepes
7f929823ca46b8cca6e8de18dbd69dde0e221ce7
[ "MIT" ]
2
2020-11-15T09:33:03.000Z
2021-02-27T19:20:20.000Z
def accent_replace(post): post = post.replace('ñ', 'ñ') post = post.replace('á', 'á') post = post.replace('é', 'é') post = post.replace('í', 'í') post = post.replace('ó', 'ó') post = post.replace('ú', 'ú') post = post.replace('¿', '¿') post = post.replace('“', '\"') post = post.replace('” ', '\" ') post = post.replace('”.', '\".') post = post.replace('.”', '.\"') post = post.replace('’', '\'') post = post.replace('ü', 'ü') post = post.replace('¡', '¡') post = post.replace('Ã\x81', 'Á') post = post.replace('\n', ' ') post = post.replace('´', '\'') post = post.replace('â\x80\x9c', '\"') post = post.replace('â\x80\x9d', '\"') post = post.replace('ð\x9f\x98¢', '\"') return post def emoticons_replace(post): post = post.replace('\u00f0\u009f\u0098\u0081', ' ') post = post.replace('\u00f0\u009f\u0098\u0082', ' ') post = post.replace('\u00f0\u009f\u0098\u0083', ' ') post = post.replace('\u00f0\u009f\u0098\u0084', ' ') post = post.replace('\u00f0\u009f\u0098\u0085', ' ') post = post.replace('\u00f0\u009f\u0098\u0086', ' ') post = post.replace('\u00f0\u009f\u0098\u0089', ' ') post = post.replace('\u00f0\u009f\u0098\u008a', ' ') post = post.replace('\u00f0\u009f\u0098\u008b', ' ') post = post.replace('\u00f0\u009f\u0098\u008c', ' ') post = post.replace('\u00f0\u009f\u0098\u008d', ' ') post = post.replace('\u00f0\u009f\u0098\u008f', ' ') post = post.replace('\u00f0\u009f\u0098\u0092', ' ') post = post.replace('\u00f0\u009f\u0098\u0093', ' ') post = post.replace('\u00f0\u009f\u0098\u0094', ' ') post = post.replace('\u00f0\u009f\u0098\u0096', ' ') post = post.replace('\u00f0\u009f\u0098\u0098', ' ') post = post.replace('\u00f0\u009f\u0098\u009a', ' ') post = post.replace('\u00f0\u009f\u0098\u009c', ' ') post = post.replace('\u00f0\u009f\u0098\u009d', ' ') post = post.replace('\u00f0\u009f\u0098\u009e', ' ') post = post.replace('\u00f0\u009f\u0098\u00a0', ' ') post = post.replace('\u00f0\u009f\u0098\u00a1', ' ') post = post.replace('\u00f0\u009f\u0098\u00a2', ' ') post = post.replace('\u00f0\u009f\u0098\u00a3', ' ') post = post.replace('\u00f0\u009f\u0098\u00a4', ' ') post = post.replace('\u00f0\u009f\u0098\u00a5', ' ') post = post.replace('\u00f0\u009f\u0098\u00a8', ' ') post = post.replace('\u00f0\u009f\u0098\u00a9', ' ') post = post.replace('\u00f0\u009f\u0098\u00aa', ' ') post = post.replace('\u00f0\u009f\u0098\u00ab', ' ') post = post.replace('\u00f0\u009f\u0098\u00ad', ' ') post = post.replace('\u00f0\u009f\u0098\u00b0', ' ') post = post.replace('\u00f0\u009f\u0098\u00b1', ' ') post = post.replace('\u00f0\u009f\u0098\u00b2', ' ') post = post.replace('\u00f0\u009f\u0098\u00b3', ' ') post = post.replace('\u00f0\u009f\u0098\u00b5', ' ') post = post.replace('\u00f0\u009f\u0098\u00b7', ' ') post = post.replace('\u00f0\u009f\u0098\u00b8', ' ') post = post.replace('\u00f0\u009f\u0098\u00b9', ' ') post = post.replace('\u00f0\u009f\u0098\u00ba', ' ') post = post.replace('\u00f0\u009f\u0098\u00bb', ' ') post = post.replace('\u00f0\u009f\u0098\u00bc', ' ') post = post.replace('\u00f0\u009f\u0098\u00bd', ' ') post = post.replace('\u00f0\u009f\u0098\u00be', ' ') post = post.replace('\u00f0\u009f\u0098\u00bf', ' ') post = post.replace('\u00f0\u009f\u0099\u0080', ' ') post = post.replace('\u00f0\u009f\u0099\u0085', ' ') post = post.replace('\u00f0\u009f\u0099\u0086', ' ') post = post.replace('\u00f0\u009f\u0099\u0087', ' ') post = post.replace('\u00f0\u009f\u0099\u0088', ' ') post = post.replace('\u00f0\u009f\u0099\u0089', ' ') post = post.replace('\u00f0\u009f\u0099\u008a', ' ') post = post.replace('\u00f0\u009f\u0099\u008b', ' ') post = post.replace('\u00f0\u009f\u0099\u008c', ' ') post = post.replace('\u00f0\u009f\u0099\u008d', ' ') post = post.replace('\u00f0\u009f\u0099\u008e', ' ') post = post.replace('\u00f0\u009f\u0099\u008f', ' ') post = post.replace('\u00e2\u009c\u0082', ' ') post = post.replace('\u00e2\u009c\u0085', ' ') post = post.replace('\u00e2\u009c\u0088', ' ') post = post.replace('\u00e2\u009c\u0089', ' ') post = post.replace('\u00e2\u009c\u008a', ' ') post = post.replace('\u00e2\u009c\u008b', ' ') post = post.replace('\u00e2\u009c\u008c', ' ') post = post.replace('\u00e2\u009c\u008f', ' ') post = post.replace('\u00e2\u009c\u0092', ' ') post = post.replace('\u00e2\u009c\u0094', ' ') post = post.replace('\u00e2\u009c\u0096', ' ') post = post.replace('\u00e2\u009c\u00a8', ' ') post = post.replace('\u00e2\u009c\u00b3', ' ') post = post.replace('\u00e2\u009c\u00b4', ' ') post = post.replace('\u00e2\u009d\u0084', ' ') post = post.replace('\u00e2\u009d\u0087', ' ') post = post.replace('\u00e2\u009d\u008c', ' ') post = post.replace('\u00e2\u009d\u008e', ' ') post = post.replace('\u00e2\u009d\u0093', ' ') post = post.replace('\u00e2\u009d\u0094', ' ') post = post.replace('\u00e2\u009d\u0095', ' ') post = post.replace('\u00e2\u009d\u0097', ' ') post = post.replace('\u00e2\u009d\u00a4', ' ') post = post.replace('\u00e2\u009e\u0095', ' ') post = post.replace('\u00e2\u009e\u0096', ' ') post = post.replace('\u00e2\u009e\u0097', ' ') post = post.replace('\u00e2\u009e\u00a1', ' ') post = post.replace('\u00e2\u009e\u00b0', ' ') post = post.replace('\u00f0\u009f\u009a\u0080', ' ') post = post.replace('\u00f0\u009f\u009a\u0083', ' ') post = post.replace('\u00f0\u009f\u009a\u0084', ' ') post = post.replace('\u00f0\u009f\u009a\u0085', ' ') post = post.replace('\u00f0\u009f\u009a\u0087', ' ') post = post.replace('\u00f0\u009f\u009a\u0089', ' ') post = post.replace('\u00f0\u009f\u009a\u008c', ' ') post = post.replace('\u00f0\u009f\u009a\u008f', ' ') post = post.replace('\u00f0\u009f\u009a\u0091', ' ') post = post.replace('\u00f0\u009f\u009a\u0092', ' ') post = post.replace('\u00f0\u009f\u009a\u0093', ' ') post = post.replace('\u00f0\u009f\u009a\u0095', ' ') post = post.replace('\u00f0\u009f\u009a\u0097', ' ') post = post.replace('\u00f0\u009f\u009a\u0099', ' ') post = post.replace('\u00f0\u009f\u009a\u009a', ' ') post = post.replace('\u00f0\u009f\u009a\u00a2', ' ') post = post.replace('\u00f0\u009f\u009a\u00a4', ' ') post = post.replace('\u00f0\u009f\u009a\u00a5', ' ') post = post.replace('\u00f0\u009f\u009a\u00a7', ' ') post = post.replace('\u00f0\u009f\u009a\u00a8', ' ') post = post.replace('\u00f0\u009f\u009a\u00a9', ' ') post = post.replace('\u00f0\u009f\u009a\u00aa', ' ') post = post.replace('\u00f0\u009f\u009a\u00ab', ' ') post = post.replace('\u00f0\u009f\u009a\u00ac', ' ') post = post.replace('\u00f0\u009f\u009a\u00ad', ' ') post = post.replace('\u00f0\u009f\u009a\u00b2', ' ') post = post.replace('\u00f0\u009f\u009a\u00b6', ' ') post = post.replace('\u00f0\u009f\u009a\u00b9', ' ') post = post.replace('\u00f0\u009f\u009a\u00ba', ' ') post = post.replace('\u00f0\u009f\u009a\u00bb', ' ') post = post.replace('\u00f0\u009f\u009a\u00bc', ' ') post = post.replace('\u00f0\u009f\u009a\u00bd', ' ') post = post.replace('\u00f0\u009f\u009a\u00be', ' ') post = post.replace('\u00f0\u009f\u009b\u0080', ' ') post = post.replace('\u00e2\u0093\u0082', ' ') post = post.replace('\u00f0\u009f\u0085\u00b0', ' ') post = post.replace('\u00f0\u009f\u0085\u00b1', ' ') post = post.replace('\u00f0\u009f\u0085\u00be', ' ') post = post.replace('\u00f0\u009f\u0085\u00bf', ' ') post = post.replace('\u00f0\u009f\u0086\u008e', ' ') post = post.replace('\u00f0\u009f\u0086\u0091', ' ') post = post.replace('\u00f0\u009f\u0086\u0092', ' ') post = post.replace('\u00f0\u009f\u0086\u0093', ' ') post = post.replace('\u00f0\u009f\u0086\u0094', ' ') post = post.replace('\u00f0\u009f\u0086\u0095', ' ') post = post.replace('\u00f0\u009f\u0086\u0096', ' ') post = post.replace('\u00f0\u009f\u0086\u0097', ' ') post = post.replace('\u00f0\u009f\u0086\u0098', ' ') post = post.replace('\u00f0\u009f\u0086\u0099', ' ') post = post.replace('\u00f0\u009f\u0086\u009a', ' ') post = post.replace('\u00f0\u009f\u0087\u00a9\u00f0\u009f\u0087\u00aa', ' ') post = post.replace('\u00f0\u009f\u0087\u00ac\u00f0\u009f\u0087\u00a7', ' ') post = post.replace('\u00f0\u009f\u0087\u00a8\u00f0\u009f\u0087\u00b3', ' ') post = post.replace('\u00f0\u009f\u0087\u00af\u00f0\u009f\u0087\u00b5', ' ') post = post.replace('\u00f0\u009f\u0087\u00ab\u00f0\u009f\u0087\u00b7', ' ') post = post.replace('\u00f0\u009f\u0087\u00b0\u00f0\u009f\u0087\u00b7', ' ') post = post.replace('\u00f0\u009f\u0087\u00aa\u00f0\u009f\u0087\u00b8', ' ') post = post.replace('\u00f0\u009f\u0087\u00ae\u00f0\u009f\u0087\u00b9', ' ') post = post.replace('\u00f0\u009f\u0087\u00b7\u00f0\u009f\u0087\u00ba', ' ') post = post.replace('\u00f0\u009f\u0087\u00ba\u00f0\u009f\u0087\u00b8', ' ') post = post.replace('\u00f0\u009f\u0088\u0081', ' ') post = post.replace('\u00f0\u009f\u0088\u0082', ' ') post = post.replace('\u00f0\u009f\u0088\u009a', ' ') post = post.replace('\u00f0\u009f\u0088\u00af', ' ') post = post.replace('\u00f0\u009f\u0088\u00b2', ' ') post = post.replace('\u00f0\u009f\u0088\u00b3', ' ') post = post.replace('\u00f0\u009f\u0088\u00b4', ' ') post = post.replace('\u00f0\u009f\u0088\u00b5', ' ') post = post.replace('\u00f0\u009f\u0088\u00b6', ' ') post = post.replace('\u00f0\u009f\u0088\u00b7', ' ') post = post.replace('\u00f0\u009f\u0088\u00b8', ' ') post = post.replace('\u00f0\u009f\u0088\u00b9', ' ') post = post.replace('\u00f0\u009f\u0088\u00ba', ' ') post = post.replace('\u00f0\u009f\u0089\u0090', ' ') post = post.replace('\u00f0\u009f\u0089\u0091', ' ') post = post.replace('\u00c2\u00a9', ' ') post = post.replace('\u00c2\u00ae', ' ') post = post.replace('\u00e2\u0080\u00bc', ' ') post = post.replace('\u00e2\u0081\u0089', ' ') post = post.replace('\u0023\u00e2\u0083\u00a3', ' ') post = post.replace('\u0038\u00e2\u0083\u00a3', ' ') post = post.replace('\u0039\u00e2\u0083\u00a3', ' ') post = post.replace('\u0037\u00e2\u0083\u00a3', ' ') post = post.replace('\u0030\u00e2\u0083\u00a3', ' ') post = post.replace('\u0036\u00e2\u0083\u00a3', ' ') post = post.replace('\u0035\u00e2\u0083\u00a3', ' ') post = post.replace('\u0034\u00e2\u0083\u00a3', ' ') post = post.replace('\u0033\u00e2\u0083\u00a3', ' ') post = post.replace('\u0032\u00e2\u0083\u00a3', ' ') post = post.replace('\u0031\u00e2\u0083\u00a3', ' ') post = post.replace('\u00e2\u0084\u00a2', ' ') post = post.replace('\u00e2\u0084\u00b9', ' ') post = post.replace('\u00e2\u0086\u0094', ' ') post = post.replace('\u00e2\u0086\u0095', ' ') post = post.replace('\u00e2\u0086\u0096', ' ') post = post.replace('\u00e2\u0086\u0097', ' ') post = post.replace('\u00e2\u0086\u0098', ' ') post = post.replace('\u00e2\u0086\u0099', ' ') post = post.replace('\u00e2\u0086\u00a9', ' ') post = post.replace('\u00e2\u0086\u00aa', ' ') post = post.replace('\u00e2\u008c\u009a', ' ') post = post.replace('\u00e2\u008c\u009b', ' ') post = post.replace('\u00e2\u008f\u00a9', ' ') post = post.replace('\u00e2\u008f\u00aa', ' ') post = post.replace('\u00e2\u008f\u00ab', ' ') post = post.replace('\u00e2\u008f\u00ac', ' ') post = post.replace('\u00e2\u008f\u00b0', ' ') post = post.replace('\u00e2\u008f\u00b3', ' ') post = post.replace('\u00e2\u0096\u00aa', ' ') post = post.replace('\u00e2\u0096\u00ab', ' ') post = post.replace('\u00e2\u0096\u00b6', ' ') post = post.replace('\u00e2\u0097\u0080', ' ') post = post.replace('\u00e2\u0097\u00bb', ' ') post = post.replace('\u00e2\u0097\u00bc', ' ') post = post.replace('\u00e2\u0097\u00bd', ' ') post = post.replace('\u00e2\u0097\u00be', ' ') post = post.replace('\u00e2\u0098\u0080', ' ') post = post.replace('\u00e2\u0098\u0081', ' ') post = post.replace('\u00e2\u0098\u008e', ' ') post = post.replace('\u00e2\u0098\u0091', ' ') post = post.replace('\u00e2\u0098\u0094', ' ') post = post.replace('\u00e2\u0098\u0095', ' ') post = post.replace('\u00e2\u0098\u009d', ' ') post = post.replace('\u00e2\u0098\u00ba', ' ') post = post.replace('\u00e2\u0099\u0088', ' ') post = post.replace('\u00e2\u0099\u0089', ' ') post = post.replace('\u00e2\u0099\u008a', ' ') post = post.replace('\u00e2\u0099\u008b', ' ') post = post.replace('\u00e2\u0099\u008c', ' ') post = post.replace('\u00e2\u0099\u008d', ' ') post = post.replace('\u00e2\u0099\u008e', ' ') post = post.replace('\u00e2\u0099\u008f', ' ') post = post.replace('\u00e2\u0099\u0090', ' ') post = post.replace('\u00e2\u0099\u0091', ' ') post = post.replace('\u00e2\u0099\u0092', ' ') post = post.replace('\u00e2\u0099\u0093', ' ') post = post.replace('\u00e2\u0099\u00a0', ' ') post = post.replace('\u00e2\u0099\u00a3', ' ') post = post.replace('\u00e2\u0099\u00a5', ' ') post = post.replace('\u00e2\u0099\u00a6', ' ') post = post.replace('\u00e2\u0099\u00a8', ' ') post = post.replace('\u00e2\u0099\u00bb', ' ') post = post.replace('\u00e2\u0099\u00bf', ' ') post = post.replace('\u00e2\u009a\u0093', ' ') post = post.replace('\u00e2\u009a\u00a0', ' ') post = post.replace('\u00e2\u009a\u00a1', ' ') post = post.replace('\u00e2\u009a\u00aa', ' ') post = post.replace('\u00e2\u009a\u00ab', ' ') post = post.replace('\u00e2\u009a\u00bd', ' ') post = post.replace('\u00e2\u009a\u00be', ' ') post = post.replace('\u00e2\u009b\u0084', ' ') post = post.replace('\u00e2\u009b\u0085', ' ') post = post.replace('\u00e2\u009b\u008e', ' ') post = post.replace('\u00e2\u009b\u0094', ' ') post = post.replace('\u00e2\u009b\u00aa', ' ') post = post.replace('\u00e2\u009b\u00b2', ' ') post = post.replace('\u00e2\u009b\u00b3', ' ') post = post.replace('\u00e2\u009b\u00b5', ' ') post = post.replace('\u00e2\u009b\u00ba', ' ') post = post.replace('\u00e2\u009b\u00bd', ' ') post = post.replace('\u00e2\u00a4\u00b4', ' ') post = post.replace('\u00e2\u00a4\u00b5', ' ') post = post.replace('\u00e2\u00ac\u0085', ' ') post = post.replace('\u00e2\u00ac\u0086', ' ') post = post.replace('\u00e2\u00ac\u0087', ' ') post = post.replace('\u00e2\u00ac\u009b', ' ') post = post.replace('\u00e2\u00ac\u009c', ' ') post = post.replace('\u00e2\u00ad\u0090', ' ') post = post.replace('\u00e2\u00ad\u0095', ' ') post = post.replace('\u00e3\u0080\u00b0', ' ') post = post.replace('\u00e3\u0080\u00bd', ' ') post = post.replace('\u00e3\u008a\u0097', ' ') post = post.replace('\u00e3\u008a\u0099', ' ') post = post.replace('\u00f0\u009f\u0080\u0084', ' ') post = post.replace('\u00f0\u009f\u0083\u008f', ' ') post = post.replace('\u00f0\u009f\u008c\u0080', ' ') post = post.replace('\u00f0\u009f\u008c\u0081', ' ') post = post.replace('\u00f0\u009f\u008c\u0082', ' ') post = post.replace('\u00f0\u009f\u008c\u0083', ' ') post = post.replace('\u00f0\u009f\u008c\u0084', ' ') post = post.replace('\u00f0\u009f\u008c\u0085', ' ') post = post.replace('\u00f0\u009f\u008c\u0086', ' ') post = post.replace('\u00f0\u009f\u008c\u0087', ' ') post = post.replace('\u00f0\u009f\u008c\u0088', ' ') post = post.replace('\u00f0\u009f\u008c\u0089', ' ') post = post.replace('\u00f0\u009f\u008c\u008a', ' ') post = post.replace('\u00f0\u009f\u008c\u008b', ' ') post = post.replace('\u00f0\u009f\u008c\u008c', ' ') post = post.replace('\u00f0\u009f\u008c\u008f', ' ') post = post.replace('\u00f0\u009f\u008c\u0091', ' ') post = post.replace('\u00f0\u009f\u008c\u0093', ' ') post = post.replace('\u00f0\u009f\u008c\u0094', ' ') post = post.replace('\u00f0\u009f\u008c\u0095', ' ') post = post.replace('\u00f0\u009f\u008c\u0099', ' ') post = post.replace('\u00f0\u009f\u008c\u009b', ' ') post = post.replace('\u00f0\u009f\u008c\u009f', ' ') post = post.replace('\u00f0\u009f\u008c\u00a0', ' ') post = post.replace('\u00f0\u009f\u008c\u00b0', ' ') post = post.replace('\u00f0\u009f\u008c\u00b1', ' ') post = post.replace('\u00f0\u009f\u008c\u00b4', ' ') post = post.replace('\u00f0\u009f\u008c\u00b5', ' ') post = post.replace('\u00f0\u009f\u008c\u00b7', ' ') post = post.replace('\u00f0\u009f\u008c\u00b8', ' ') post = post.replace('\u00f0\u009f\u008c\u00b9', ' ') post = post.replace('\u00f0\u009f\u008c\u00ba', ' ') post = post.replace('\u00f0\u009f\u008c\u00bb', ' ') post = post.replace('\u00f0\u009f\u008c\u00bc', ' ') post = post.replace('\u00f0\u009f\u008c\u00bd', ' ') post = post.replace('\u00f0\u009f\u008c\u00be', ' ') post = post.replace('\u00f0\u009f\u008c\u00bf', ' ') post = post.replace('\u00f0\u009f\u008d\u0080', ' ') post = post.replace('\u00f0\u009f\u008d\u0081', ' ') post = post.replace('\u00f0\u009f\u008d\u0082', ' ') post = post.replace('\u00f0\u009f\u008d\u0083', ' ') post = post.replace('\u00f0\u009f\u008d\u0084', ' ') post = post.replace('\u00f0\u009f\u008d\u0085', ' ') post = post.replace('\u00f0\u009f\u008d\u0086', ' ') post = post.replace('\u00f0\u009f\u008d\u0087', ' ') post = post.replace('\u00f0\u009f\u008d\u0088', ' ') post = post.replace('\u00f0\u009f\u008d\u0089', ' ') post = post.replace('\u00f0\u009f\u008d\u008a', ' ') post = post.replace('\u00f0\u009f\u008d\u008c', ' ') post = post.replace('\u00f0\u009f\u008d\u008d', ' ') post = post.replace('\u00f0\u009f\u008d\u008e', ' ') post = post.replace('\u00f0\u009f\u008d\u008f', ' ') post = post.replace('\u00f0\u009f\u008d\u0091', ' ') post = post.replace('\u00f0\u009f\u008d\u0092', ' ') post = post.replace('\u00f0\u009f\u008d\u0093', ' ') post = post.replace('\u00f0\u009f\u008d\u0094', ' ') post = post.replace('\u00f0\u009f\u008d\u0095', ' ') post = post.replace('\u00f0\u009f\u008d\u0096', ' ') post = post.replace('\u00f0\u009f\u008d\u0097', ' ') post = post.replace('\u00f0\u009f\u008d\u0098', ' ') post = post.replace('\u00f0\u009f\u008d\u0099', ' ') post = post.replace('\u00f0\u009f\u008d\u009a', ' ') post = post.replace('\u00f0\u009f\u008d\u009b', ' ') post = post.replace('\u00f0\u009f\u008d\u009c', ' ') post = post.replace('\u00f0\u009f\u008d\u009d', ' ') post = post.replace('\u00f0\u009f\u008d\u009e', ' ') post = post.replace('\u00f0\u009f\u008d\u009f', ' ') post = post.replace('\u00f0\u009f\u008d\u00a0', ' ') post = post.replace('\u00f0\u009f\u008d\u00a1', ' ') post = post.replace('\u00f0\u009f\u008d\u00a2', ' ') post = post.replace('\u00f0\u009f\u008d\u00a3', ' ') post = post.replace('\u00f0\u009f\u008d\u00a4', ' ') post = post.replace('\u00f0\u009f\u008d\u00a5', ' ') post = post.replace('\u00f0\u009f\u008d\u00a6', ' ') post = post.replace('\u00f0\u009f\u008d\u00a7', ' ') post = post.replace('\u00f0\u009f\u008d\u00a8', ' ') post = post.replace('\u00f0\u009f\u008d\u00a9', ' ') post = post.replace('\u00f0\u009f\u008d\u00aa', ' ') post = post.replace('\u00f0\u009f\u008d\u00ab', ' ') post = post.replace('\u00f0\u009f\u008d\u00ac', ' ') post = post.replace('\u00f0\u009f\u008d\u00ad', ' ') post = post.replace('\u00f0\u009f\u008d\u00ae', ' ') post = post.replace('\u00f0\u009f\u008d\u00af', ' ') post = post.replace('\u00f0\u009f\u008d\u00b0', ' ') post = post.replace('\u00f0\u009f\u008d\u00b1', ' ') post = post.replace('\u00f0\u009f\u008d\u00b2', ' ') post = post.replace('\u00f0\u009f\u008d\u00b3', ' ') post = post.replace('\u00f0\u009f\u008d\u00b4', ' ') post = post.replace('\u00f0\u009f\u008d\u00b5', ' ') post = post.replace('\u00f0\u009f\u008d\u00b6', ' ') post = post.replace('\u00f0\u009f\u008d\u00b7', ' ') post = post.replace('\u00f0\u009f\u008d\u00b8', ' ') post = post.replace('\u00f0\u009f\u008d\u00b9', ' ') post = post.replace('\u00f0\u009f\u008d\u00ba', ' ') post = post.replace('\u00f0\u009f\u008d\u00bb', ' ') post = post.replace('\u00f0\u009f\u008e\u0080', ' ') post = post.replace('\u00f0\u009f\u008e\u0081', ' ') post = post.replace('\u00f0\u009f\u008e\u0082', ' ') post = post.replace('\u00f0\u009f\u008e\u0083', ' ') post = post.replace('\u00f0\u009f\u008e\u0084', ' ') post = post.replace('\u00f0\u009f\u008e\u0085', ' ') post = post.replace('\u00f0\u009f\u008e\u0086', ' ') post = post.replace('\u00f0\u009f\u008e\u0087', ' ') post = post.replace('\u00f0\u009f\u008e\u0088', ' ') post = post.replace('\u00f0\u009f\u008e\u0089', ' ') post = post.replace('\u00f0\u009f\u008e\u008a', ' ') post = post.replace('\u00f0\u009f\u008e\u008b', ' ') post = post.replace('\u00f0\u009f\u008e\u008c', ' ') post = post.replace('\u00f0\u009f\u008e\u008d', ' ') post = post.replace('\u00f0\u009f\u008e\u008e', ' ') post = post.replace('\u00f0\u009f\u008e\u008f', ' ') post = post.replace('\u00f0\u009f\u008e\u0090', ' ') post = post.replace('\u00f0\u009f\u008e\u0091', ' ') post = post.replace('\u00f0\u009f\u008e\u0092', ' ') post = post.replace('\u00f0\u009f\u008e\u0093', ' ') post = post.replace('\u00f0\u009f\u008e\u00a0', ' ') post = post.replace('\u00f0\u009f\u008e\u00a1', ' ') post = post.replace('\u00f0\u009f\u008e\u00a2', ' ') post = post.replace('\u00f0\u009f\u008e\u00a3', ' ') post = post.replace('\u00f0\u009f\u008e\u00a4', ' ') post = post.replace('\u00f0\u009f\u008e\u00a5', ' ') post = post.replace('\u00f0\u009f\u008e\u00a6', ' ') post = post.replace('\u00f0\u009f\u008e\u00a7', ' ') post = post.replace('\u00f0\u009f\u008e\u00a8', ' ') post = post.replace('\u00f0\u009f\u008e\u00a9', ' ') post = post.replace('\u00f0\u009f\u008e\u00aa', ' ') post = post.replace('\u00f0\u009f\u008e\u00ab', ' ') post = post.replace('\u00f0\u009f\u008e\u00ac', ' ') post = post.replace('\u00f0\u009f\u008e\u00ad', ' ') post = post.replace('\u00f0\u009f\u008e\u00ae', ' ') post = post.replace('\u00f0\u009f\u008e\u00af', ' ') post = post.replace('\u00f0\u009f\u008e\u00b0', ' ') post = post.replace('\u00f0\u009f\u008e\u00b1', ' ') post = post.replace('\u00f0\u009f\u008e\u00b2', ' ') post = post.replace('\u00f0\u009f\u008e\u00b3', ' ') post = post.replace('\u00f0\u009f\u008e\u00b4', ' ') post = post.replace('\u00f0\u009f\u008e\u00b5', ' ') post = post.replace('\u00f0\u009f\u008e\u00b6', ' ') post = post.replace('\u00f0\u009f\u008e\u00b7', ' ') post = post.replace('\u00f0\u009f\u008e\u00b8', ' ') post = post.replace('\u00f0\u009f\u008e\u00b9', ' ') post = post.replace('\u00f0\u009f\u008e\u00ba', ' ') post = post.replace('\u00f0\u009f\u008e\u00bb', ' ') post = post.replace('\u00f0\u009f\u008e\u00bc', ' ') post = post.replace('\u00f0\u009f\u008e\u00bd', ' ') post = post.replace('\u00f0\u009f\u008e\u00be', ' ') post = post.replace('\u00f0\u009f\u008e\u00bf', ' ') post = post.replace('\u00f0\u009f\u008f\u0080', ' ') post = post.replace('\u00f0\u009f\u008f\u0081', ' ') post = post.replace('\u00f0\u009f\u008f\u0082', ' ') post = post.replace('\u00f0\u009f\u008f\u0083', ' ') post = post.replace('\u00f0\u009f\u008f\u0084', ' ') post = post.replace('\u00f0\u009f\u008f\u0086', ' ') post = post.replace('\u00f0\u009f\u008f\u0088', ' ') post = post.replace('\u00f0\u009f\u008f\u008a', ' ') post = post.replace('\u00f0\u009f\u008f\u00a0', ' ') post = post.replace('\u00f0\u009f\u008f\u00a1', ' ') post = post.replace('\u00f0\u009f\u008f\u00a2', ' ') post = post.replace('\u00f0\u009f\u008f\u00a3', ' ') post = post.replace('\u00f0\u009f\u008f\u00a5', ' ') post = post.replace('\u00f0\u009f\u008f\u00a6', ' ') post = post.replace('\u00f0\u009f\u008f\u00a7', ' ') post = post.replace('\u00f0\u009f\u008f\u00a8', ' ') post = post.replace('\u00f0\u009f\u008f\u00a9', ' ') post = post.replace('\u00f0\u009f\u008f\u00aa', ' ') post = post.replace('\u00f0\u009f\u008f\u00ab', ' ') post = post.replace('\u00f0\u009f\u008f\u00ac', ' ') post = post.replace('\u00f0\u009f\u008f\u00ad', ' ') post = post.replace('\u00f0\u009f\u008f\u00ae', ' ') post = post.replace('\u00f0\u009f\u008f\u00af', ' ') post = post.replace('\u00f0\u009f\u008f\u00b0', ' ') post = post.replace('\u00f0\u009f\u0090\u008c', ' ') post = post.replace('\u00f0\u009f\u0090\u008d', ' ') post = post.replace('\u00f0\u009f\u0090\u008e', ' ') post = post.replace('\u00f0\u009f\u0090\u0091', ' ') post = post.replace('\u00f0\u009f\u0090\u0092', ' ') post = post.replace('\u00f0\u009f\u0090\u0094', ' ') post = post.replace('\u00f0\u009f\u0090\u0097', ' ') post = post.replace('\u00f0\u009f\u0090\u0098', ' ') post = post.replace('\u00f0\u009f\u0090\u0099', ' ') post = post.replace('\u00f0\u009f\u0090\u009a', ' ') post = post.replace('\u00f0\u009f\u0090\u009b', ' ') post = post.replace('\u00f0\u009f\u0090\u009c', ' ') post = post.replace('\u00f0\u009f\u0090\u009d', ' ') post = post.replace('\u00f0\u009f\u0090\u009e', ' ') post = post.replace('\u00f0\u009f\u0090\u009f', ' ') post = post.replace('\u00f0\u009f\u0090\u00a0', ' ') post = post.replace('\u00f0\u009f\u0090\u00a1', ' ') post = post.replace('\u00f0\u009f\u0090\u00a2', ' ') post = post.replace('\u00f0\u009f\u0090\u00a3', ' ') post = post.replace('\u00f0\u009f\u0090\u00a4', ' ') post = post.replace('\u00f0\u009f\u0090\u00a5', ' ') post = post.replace('\u00f0\u009f\u0090\u00a6', ' ') post = post.replace('\u00f0\u009f\u0090\u00a7', ' ') post = post.replace('\u00f0\u009f\u0090\u00a8', ' ') post = post.replace('\u00f0\u009f\u0090\u00a9', ' ') post = post.replace('\u00f0\u009f\u0090\u00ab', ' ') post = post.replace('\u00f0\u009f\u0090\u00ac', ' ') post = post.replace('\u00f0\u009f\u0090\u00ad', ' ') post = post.replace('\u00f0\u009f\u0090\u00ae', ' ') post = post.replace('\u00f0\u009f\u0090\u00af', ' ') post = post.replace('\u00f0\u009f\u0090\u00b0', ' ') post = post.replace('\u00f0\u009f\u0090\u00b1', ' ') post = post.replace('\u00f0\u009f\u0090\u00b2', ' ') post = post.replace('\u00f0\u009f\u0090\u00b3', ' ') post = post.replace('\u00f0\u009f\u0090\u00b4', ' ') post = post.replace('\u00f0\u009f\u0090\u00b5', ' ') post = post.replace('\u00f0\u009f\u0090\u00b6', ' ') post = post.replace('\u00f0\u009f\u0090\u00b7', ' ') post = post.replace('\u00f0\u009f\u0090\u00b8', ' ') post = post.replace('\u00f0\u009f\u0090\u00b9', ' ') post = post.replace('\u00f0\u009f\u0090\u00ba', ' ') post = post.replace('\u00f0\u009f\u0090\u00bb', ' ') post = post.replace('\u00f0\u009f\u0090\u00bc', ' ') post = post.replace('\u00f0\u009f\u0090\u00bd', ' ') post = post.replace('\u00f0\u009f\u0090\u00be', ' ') post = post.replace('\u00f0\u009f\u0091\u0080', ' ') post = post.replace('\u00f0\u009f\u0091\u0082', ' ') post = post.replace('\u00f0\u009f\u0091\u0083', ' ') post = post.replace('\u00f0\u009f\u0091\u0084', ' ') post = post.replace('\u00f0\u009f\u0091\u0085', ' ') post = post.replace('\u00f0\u009f\u0091\u0086', ' ') post = post.replace('\u00f0\u009f\u0091\u0087', ' ') post = post.replace('\u00f0\u009f\u0091\u0088', ' ') post = post.replace('\u00f0\u009f\u0091\u0089', ' ') post = post.replace('\u00f0\u009f\u0091\u008a', ' ') post = post.replace('\u00f0\u009f\u0091\u008b', ' ') post = post.replace('\u00f0\u009f\u0091\u008c', ' ') post = post.replace('\u00f0\u009f\u0091\u008d', ' ') post = post.replace('\u00f0\u009f\u0091\u008e', ' ') post = post.replace('\u00f0\u009f\u0091\u008f', ' ') post = post.replace('\u00f0\u009f\u0091\u0090', ' ') post = post.replace('\u00f0\u009f\u0091\u0091', ' ') post = post.replace('\u00f0\u009f\u0091\u0092', ' ') post = post.replace('\u00f0\u009f\u0091\u0093', ' ') post = post.replace('\u00f0\u009f\u0091\u0094', ' ') post = post.replace('\u00f0\u009f\u0091\u0095', ' ') post = post.replace('\u00f0\u009f\u0091\u0096', ' ') post = post.replace('\u00f0\u009f\u0091\u0097', ' ') post = post.replace('\u00f0\u009f\u0091\u0098', ' ') post = post.replace('\u00f0\u009f\u0091\u0099', ' ') post = post.replace('\u00f0\u009f\u0091\u009a', ' ') post = post.replace('\u00f0\u009f\u0091\u009b', ' ') post = post.replace('\u00f0\u009f\u0091\u009c', ' ') post = post.replace('\u00f0\u009f\u0091\u009d', ' ') post = post.replace('\u00f0\u009f\u0091\u009e', ' ') post = post.replace('\u00f0\u009f\u0091\u009f', ' ') post = post.replace('\u00f0\u009f\u0091\u00a0', ' ') post = post.replace('\u00f0\u009f\u0091\u00a1', ' ') post = post.replace('\u00f0\u009f\u0091\u00a2', ' ') post = post.replace('\u00f0\u009f\u0091\u00a3', ' ') post = post.replace('\u00f0\u009f\u0091\u00a4', ' ') post = post.replace('\u00f0\u009f\u0091\u00a6', ' ') post = post.replace('\u00f0\u009f\u0091\u00a7', ' ') post = post.replace('\u00f0\u009f\u0091\u00a8', ' ') post = post.replace('\u00f0\u009f\u0091\u00a9', ' ') post = post.replace('\u00f0\u009f\u0091\u00aa', ' ') post = post.replace('\u00f0\u009f\u0091\u00ab', ' ') post = post.replace('\u00f0\u009f\u0091\u00ae', ' ') post = post.replace('\u00f0\u009f\u0091\u00af', ' ') post = post.replace('\u00f0\u009f\u0091\u00b0', ' ') post = post.replace('\u00f0\u009f\u0091\u00b1', ' ') post = post.replace('\u00f0\u009f\u0091\u00b2', ' ') post = post.replace('\u00f0\u009f\u0091\u00b3', ' ') post = post.replace('\u00f0\u009f\u0091\u00b4', ' ') post = post.replace('\u00f0\u009f\u0091\u00b5', ' ') post = post.replace('\u00f0\u009f\u0091\u00b6', ' ') post = post.replace('\u00f0\u009f\u0091\u00b7', ' ') post = post.replace('\u00f0\u009f\u0091\u00b8', ' ') post = post.replace('\u00f0\u009f\u0091\u00b9', ' ') post = post.replace('\u00f0\u009f\u0091\u00ba', ' ') post = post.replace('\u00f0\u009f\u0091\u00bb', ' ') post = post.replace('\u00f0\u009f\u0091\u00bc', ' ') post = post.replace('\u00f0\u009f\u0091\u00bd', ' ') post = post.replace('\u00f0\u009f\u0091\u00be', ' ') post = post.replace('\u00f0\u009f\u0091\u00bf', ' ') post = post.replace('\u00f0\u009f\u0092\u0080', ' ') post = post.replace('\u00f0\u009f\u0092\u0081', ' ') post = post.replace('\u00f0\u009f\u0092\u0082', ' ') post = post.replace('\u00f0\u009f\u0092\u0083', ' ') post = post.replace('\u00f0\u009f\u0092\u0084', ' ') post = post.replace('\u00f0\u009f\u0092\u0085', ' ') post = post.replace('\u00f0\u009f\u0092\u0086', ' ') post = post.replace('\u00f0\u009f\u0092\u0087', ' ') post = post.replace('\u00f0\u009f\u0092\u0088', ' ') post = post.replace('\u00f0\u009f\u0092\u0089', ' ') post = post.replace('\u00f0\u009f\u0092\u008a', ' ') post = post.replace('\u00f0\u009f\u0092\u008b', ' ') post = post.replace('\u00f0\u009f\u0092\u008c', ' ') post = post.replace('\u00f0\u009f\u0092\u008d', ' ') post = post.replace('\u00f0\u009f\u0092\u008e', ' ') post = post.replace('\u00f0\u009f\u0092\u008f', ' ') post = post.replace('\u00f0\u009f\u0092\u0090', ' ') post = post.replace('\u00f0\u009f\u0092\u0091', ' ') post = post.replace('\u00f0\u009f\u0092\u0092', ' ') post = post.replace('\u00f0\u009f\u0092\u0093', ' ') post = post.replace('\u00f0\u009f\u0092\u0094', ' ') post = post.replace('\u00f0\u009f\u0092\u0095', ' ') post = post.replace('\u00f0\u009f\u0092\u0096', ' ') post = post.replace('\u00f0\u009f\u0092\u0097', ' ') post = post.replace('\u00f0\u009f\u0092\u0098', ' ') post = post.replace('\u00f0\u009f\u0092\u0099', ' ') post = post.replace('\u00f0\u009f\u0092\u009a', ' ') post = post.replace('\u00f0\u009f\u0092\u009b', ' ') post = post.replace('\u00f0\u009f\u0092\u009c', ' ') post = post.replace('\u00f0\u009f\u0092\u009d', ' ') post = post.replace('\u00f0\u009f\u0092\u009e', ' ') post = post.replace('\u00f0\u009f\u0092\u009f', ' ') post = post.replace('\u00f0\u009f\u0092\u00a0', ' ') post = post.replace('\u00f0\u009f\u0092\u00a1', ' ') post = post.replace('\u00f0\u009f\u0092\u00a2', ' ') post = post.replace('\u00f0\u009f\u0092\u00a3', ' ') post = post.replace('\u00f0\u009f\u0092\u00a4', ' ') post = post.replace('\u00f0\u009f\u0092\u00a5', ' ') post = post.replace('\u00f0\u009f\u0092\u00a6', ' ') post = post.replace('\u00f0\u009f\u0092\u00a7', ' ') post = post.replace('\u00f0\u009f\u0092\u00a8', ' ') post = post.replace('\u00f0\u009f\u0092\u00a9', ' ') post = post.replace('\u00f0\u009f\u0092\u00aa', ' ') post = post.replace('\u00f0\u009f\u0092\u00ab', ' ') post = post.replace('\u00f0\u009f\u0092\u00ac', ' ') post = post.replace('\u00f0\u009f\u0092\u00ae', ' ') post = post.replace('\u00f0\u009f\u0092\u00af', ' ') post = post.replace('\u00f0\u009f\u0092\u00b0', ' ') post = post.replace('\u00f0\u009f\u0092\u00b1', ' ') post = post.replace('\u00f0\u009f\u0092\u00b2', ' ') post = post.replace('\u00f0\u009f\u0092\u00b3', ' ') post = post.replace('\u00f0\u009f\u0092\u00b4', ' ') post = post.replace('\u00f0\u009f\u0092\u00b5', ' ') post = post.replace('\u00f0\u009f\u0092\u00b8', ' ') post = post.replace('\u00f0\u009f\u0092\u00b9', ' ') post = post.replace('\u00f0\u009f\u0092\u00ba', ' ') post = post.replace('\u00f0\u009f\u0092\u00bb', ' ') post = post.replace('\u00f0\u009f\u0092\u00bc', ' ') post = post.replace('\u00f0\u009f\u0092\u00bd', ' ') post = post.replace('\u00f0\u009f\u0092\u00be', ' ') post = post.replace('\u00f0\u009f\u0092\u00bf', ' ') post = post.replace('\u00f0\u009f\u0093\u0080', ' ') post = post.replace('\u00f0\u009f\u0093\u0081', ' ') post = post.replace('\u00f0\u009f\u0093\u0082', ' ') post = post.replace('\u00f0\u009f\u0093\u0083', ' ') post = post.replace('\u00f0\u009f\u0093\u0084', ' ') post = post.replace('\u00f0\u009f\u0093\u0085', ' ') post = post.replace('\u00f0\u009f\u0093\u0086', ' ') post = post.replace('\u00f0\u009f\u0093\u0087', ' ') post = post.replace('\u00f0\u009f\u0093\u0088', ' ') post = post.replace('\u00f0\u009f\u0093\u0089', ' ') post = post.replace('\u00f0\u009f\u0093\u008a', ' ') post = post.replace('\u00f0\u009f\u0093\u008b', ' ') post = post.replace('\u00f0\u009f\u0093\u008c', ' ') post = post.replace('\u00f0\u009f\u0093\u008d', ' ') post = post.replace('\u00f0\u009f\u0093\u008e', ' ') post = post.replace('\u00f0\u009f\u0093\u008f', ' ') post = post.replace('\u00f0\u009f\u0093\u0090', ' ') post = post.replace('\u00f0\u009f\u0093\u0091', ' ') post = post.replace('\u00f0\u009f\u0093\u0092', ' ') post = post.replace('\u00f0\u009f\u0093\u0093', ' ') post = post.replace('\u00f0\u009f\u0093\u0094', ' ') post = post.replace('\u00f0\u009f\u0093\u0095', ' ') post = post.replace('\u00f0\u009f\u0093\u0096', ' ') post = post.replace('\u00f0\u009f\u0093\u0097', ' ') post = post.replace('\u00f0\u009f\u0093\u0098', ' ') post = post.replace('\u00f0\u009f\u0093\u0099', ' ') post = post.replace('\u00f0\u009f\u0093\u009a', ' ') post = post.replace('\u00f0\u009f\u0093\u009b', ' ') post = post.replace('\u00f0\u009f\u0093\u009c', ' ') post = post.replace('\u00f0\u009f\u0093\u009d', ' ') post = post.replace('\u00f0\u009f\u0093\u009e', ' ') post = post.replace('\u00f0\u009f\u0093\u009f', ' ') post = post.replace('\u00f0\u009f\u0093\u00a0', ' ') post = post.replace('\u00f0\u009f\u0093\u00a1', ' ') post = post.replace('\u00f0\u009f\u0093\u00a2', ' ') post = post.replace('\u00f0\u009f\u0093\u00a3', ' ') post = post.replace('\u00f0\u009f\u0093\u00a4', ' ') post = post.replace('\u00f0\u009f\u0093\u00a5', ' ') post = post.replace('\u00f0\u009f\u0093\u00a6', ' ') post = post.replace('\u00f0\u009f\u0093\u00a7', ' ') post = post.replace('\u00f0\u009f\u0093\u00a8', ' ') post = post.replace('\u00f0\u009f\u0093\u00a9', ' ') post = post.replace('\u00f0\u009f\u0093\u00aa', ' ') post = post.replace('\u00f0\u009f\u0093\u00ab', ' ') post = post.replace('\u00f0\u009f\u0093\u00ae', ' ') post = post.replace('\u00f0\u009f\u0093\u00b0', ' ') post = post.replace('\u00f0\u009f\u0093\u00b1', ' ') post = post.replace('\u00f0\u009f\u0093\u00b2', ' ') post = post.replace('\u00f0\u009f\u0093\u00b3', ' ') post = post.replace('\u00f0\u009f\u0093\u00b4', ' ') post = post.replace('\u00f0\u009f\u0093\u00b6', ' ') post = post.replace('\u00f0\u009f\u0093\u00b7', ' ') post = post.replace('\u00f0\u009f\u0093\u00b9', ' ') post = post.replace('\u00f0\u009f\u0093\u00ba', ' ') post = post.replace('\u00f0\u009f\u0093\u00bb', ' ') post = post.replace('\u00f0\u009f\u0093\u00bc', ' ') post = post.replace('\u00f0\u009f\u0094\u0083', ' ') post = post.replace('\u00f0\u009f\u0094\u008a', ' ') post = post.replace('\u00f0\u009f\u0094\u008b', ' ') post = post.replace('\u00f0\u009f\u0094\u008c', ' ') post = post.replace('\u00f0\u009f\u0094\u008d', ' ') post = post.replace('\u00f0\u009f\u0094\u008e', ' ') post = post.replace('\u00f0\u009f\u0094\u008f', ' ') post = post.replace('\u00f0\u009f\u0094\u0090', ' ') post = post.replace('\u00f0\u009f\u0094\u0091', ' ') post = post.replace('\u00f0\u009f\u0094\u0092', ' ') post = post.replace('\u00f0\u009f\u0094\u0093', ' ') post = post.replace('\u00f0\u009f\u0094\u0094', ' ') post = post.replace('\u00f0\u009f\u0094\u0096', ' ') post = post.replace('\u00f0\u009f\u0094\u0097', ' ') post = post.replace('\u00f0\u009f\u0094\u0098', ' ') post = post.replace('\u00f0\u009f\u0094\u0099', ' ') post = post.replace('\u00f0\u009f\u0094\u009a', ' ') post = post.replace('\u00f0\u009f\u0094\u009b', ' ') post = post.replace('\u00f0\u009f\u0094\u009c', ' ') post = post.replace('\u00f0\u009f\u0094\u009d', ' ') post = post.replace('\u00f0\u009f\u0094\u009e', ' ') post = post.replace('\u00f0\u009f\u0094\u009f', ' ') post = post.replace('\u00f0\u009f\u0094\u00a0', ' ') post = post.replace('\u00f0\u009f\u0094\u00a1', ' ') post = post.replace('\u00f0\u009f\u0094\u00a2', ' ') post = post.replace('\u00f0\u009f\u0094\u00a3', ' ') post = post.replace('\u00f0\u009f\u0094\u00a4', ' ') post = post.replace('\u00f0\u009f\u0094\u00a5', ' ') post = post.replace('\u00f0\u009f\u0094\u00a6', ' ') post = post.replace('\u00f0\u009f\u0094\u00a7', ' ') post = post.replace('\u00f0\u009f\u0094\u00a8', ' ') post = post.replace('\u00f0\u009f\u0094\u00a9', ' ') post = post.replace('\u00f0\u009f\u0094\u00aa', ' ') post = post.replace('\u00f0\u009f\u0094\u00ab', ' ') post = post.replace('\u00f0\u009f\u0094\u00ae', ' ') post = post.replace('\u00f0\u009f\u0094\u00af', ' ') post = post.replace('\u00f0\u009f\u0094\u00b0', ' ') post = post.replace('\u00f0\u009f\u0094\u00b1', ' ') post = post.replace('\u00f0\u009f\u0094\u00b2', ' ') post = post.replace('\u00f0\u009f\u0094\u00b3', ' ') post = post.replace('\u00f0\u009f\u0094\u00b4', ' ') post = post.replace('\u00f0\u009f\u0094\u00b5', ' ') post = post.replace('\u00f0\u009f\u0094\u00b6', ' ') post = post.replace('\u00f0\u009f\u0094\u00b7', ' ') post = post.replace('\u00f0\u009f\u0094\u00b8', ' ') post = post.replace('\u00f0\u009f\u0094\u00b9', ' ') post = post.replace('\u00f0\u009f\u0094\u00ba', ' ') post = post.replace('\u00f0\u009f\u0094\u00bb', ' ') post = post.replace('\u00f0\u009f\u0094\u00bc', ' ') post = post.replace('\u00f0\u009f\u0094\u00bd', ' ') post = post.replace('\u00f0\u009f\u0095\u0090', ' ') post = post.replace('\u00f0\u009f\u0095\u0091', ' ') post = post.replace('\u00f0\u009f\u0095\u0092', ' ') post = post.replace('\u00f0\u009f\u0095\u0093', ' ') post = post.replace('\u00f0\u009f\u0095\u0094', ' ') post = post.replace('\u00f0\u009f\u0095\u0095', ' ') post = post.replace('\u00f0\u009f\u0095\u0096', ' ') post = post.replace('\u00f0\u009f\u0095\u0097', ' ') post = post.replace('\u00f0\u009f\u0095\u0098', ' ') post = post.replace('\u00f0\u009f\u0095\u0099', ' ') post = post.replace('\u00f0\u009f\u0095\u009a', ' ') post = post.replace('\u00f0\u009f\u0095\u009b', ' ') post = post.replace('\u00f0\u009f\u0097\u00bb', ' ') post = post.replace('\u00f0\u009f\u0097\u00bc', ' ') post = post.replace('\u00f0\u009f\u0097\u00bd', ' ') post = post.replace('\u00f0\u009f\u0097\u00be', ' ') post = post.replace('\u00f0\u009f\u0097\u00bf', ' ') post = post.replace('\u00f0\u009f\u0098\u0080', ' ') post = post.replace('\u00f0\u009f\u0098\u0087', ' ') post = post.replace('\u00f0\u009f\u0098\u0088', ' ') post = post.replace('\u00f0\u009f\u0098\u008e', ' ') post = post.replace('\u00f0\u009f\u0098\u0090', ' ') post = post.replace('\u00f0\u009f\u0098\u0091', ' ') post = post.replace('\u00f0\u009f\u0098\u0095', ' ') post = post.replace('\u00f0\u009f\u0098\u0097', ' ') post = post.replace('\u00f0\u009f\u0098\u0099', ' ') post = post.replace('\u00f0\u009f\u0098\u009b', ' ') post = post.replace('\u00f0\u009f\u0098\u009f', ' ') post = post.replace('\u00f0\u009f\u0098\u00a6', ' ') post = post.replace('\u00f0\u009f\u0098\u00a7', ' ') post = post.replace('\u00f0\u009f\u0098\u00ac', ' ') post = post.replace('\u00f0\u009f\u0098\u00ae', ' ') post = post.replace('\u00f0\u009f\u0098\u00af', ' ') post = post.replace('\u00f0\u009f\u0098\u00b4', ' ') post = post.replace('\u00f0\u009f\u0098\u00b6', ' ') post = post.replace('\u00f0\u009f\u009a\u0081', ' ') post = post.replace('\u00f0\u009f\u009a\u0082', ' ') post = post.replace('\u00f0\u009f\u009a\u0086', ' ') post = post.replace('\u00f0\u009f\u009a\u0088', ' ') post = post.replace('\u00f0\u009f\u009a\u008a', ' ') post = post.replace('\u00f0\u009f\u009a\u008d', ' ') post = post.replace('\u00f0\u009f\u009a\u008e', ' ') post = post.replace('\u00f0\u009f\u009a\u0090', ' ') post = post.replace('\u00f0\u009f\u009a\u0094', ' ') post = post.replace('\u00f0\u009f\u009a\u0096', ' ') post = post.replace('\u00f0\u009f\u009a\u0098', ' ') post = post.replace('\u00f0\u009f\u009a\u009b', ' ') post = post.replace('\u00f0\u009f\u009a\u009c', ' ') post = post.replace('\u00f0\u009f\u009a\u009d', ' ') post = post.replace('\u00f0\u009f\u009a\u009e', ' ') post = post.replace('\u00f0\u009f\u009a\u009f', ' ') post = post.replace('\u00f0\u009f\u009a\u00a0', ' ') post = post.replace('\u00f0\u009f\u009a\u00a1', ' ') post = post.replace('\u00f0\u009f\u009a\u00a3', ' ') post = post.replace('\u00f0\u009f\u009a\u00a6', ' ') post = post.replace('\u00f0\u009f\u009a\u00ae', ' ') post = post.replace('\u00f0\u009f\u009a\u00af', ' ') post = post.replace('\u00f0\u009f\u009a\u00b0', ' ') post = post.replace('\u00f0\u009f\u009a\u00b1', ' ') post = post.replace('\u00f0\u009f\u009a\u00b3', ' ') post = post.replace('\u00f0\u009f\u009a\u00b4', ' ') post = post.replace('\u00f0\u009f\u009a\u00b5', ' ') post = post.replace('\u00f0\u009f\u009a\u00b7', ' ') post = post.replace('\u00f0\u009f\u009a\u00b8', ' ') post = post.replace('\u00f0\u009f\u009a\u00bf', ' ') post = post.replace('\u00f0\u009f\u009b\u0081', ' ') post = post.replace('\u00f0\u009f\u009b\u0082', ' ') post = post.replace('\u00f0\u009f\u009b\u0083', ' ') post = post.replace('\u00f0\u009f\u009b\u0084', ' ') post = post.replace('\u00f0\u009f\u009b\u0085', ' ') post = post.replace('\u00f0\u009f\u008c\u008d', ' ') post = post.replace('\u00f0\u009f\u008c\u008e', ' ') post = post.replace('\u00f0\u009f\u008c\u0090', ' ') post = post.replace('\u00f0\u009f\u008c\u0092', ' ') post = post.replace('\u00f0\u009f\u008c\u0096', ' ') post = post.replace('\u00f0\u009f\u008c\u0097', ' ') post = post.replace('\u00f0\u009f\u008c\u0098', ' ') post = post.replace('\u00f0\u009f\u008c\u009a', ' ') post = post.replace('\u00f0\u009f\u008c\u009c', ' ') post = post.replace('\u00f0\u009f\u008c\u009d', ' ') post = post.replace('\u00f0\u009f\u008c\u009e', ' ') post = post.replace('\u00f0\u009f\u008c\u00b2', ' ') post = post.replace('\u00f0\u009f\u008c\u00b3', ' ') post = post.replace('\u00f0\u009f\u008d\u008b', ' ') post = post.replace('\u00f0\u009f\u008d\u0090', ' ') post = post.replace('\u00f0\u009f\u008d\u00bc', ' ') post = post.replace('\u00f0\u009f\u008f\u0087', ' ') post = post.replace('\u00f0\u009f\u008f\u0089', ' ') post = post.replace('\u00f0\u009f\u008f\u00a4', ' ') post = post.replace('\u00f0\u009f\u0090\u0080', ' ') post = post.replace('\u00f0\u009f\u0090\u0081', ' ') post = post.replace('\u00f0\u009f\u0090\u0082', ' ') post = post.replace('\u00f0\u009f\u0090\u0083', ' ') post = post.replace('\u00f0\u009f\u0090\u0084', ' ') post = post.replace('\u00f0\u009f\u0090\u0085', ' ') post = post.replace('\u00f0\u009f\u0090\u0086', ' ') post = post.replace('\u00f0\u009f\u0090\u0087', ' ') post = post.replace('\u00f0\u009f\u0090\u0088', ' ') post = post.replace('\u00f0\u009f\u0090\u0089', ' ') post = post.replace('\u00f0\u009f\u0090\u008a', ' ') post = post.replace('\u00f0\u009f\u0090\u008b', ' ') post = post.replace('\u00f0\u009f\u0090\u008f', ' ') post = post.replace('\u00f0\u009f\u0090\u0090', ' ') post = post.replace('\u00f0\u009f\u0090\u0093', ' ') post = post.replace('\u00f0\u009f\u0090\u0095', ' ') post = post.replace('\u00f0\u009f\u0090\u0096', ' ') post = post.replace('\u00f0\u009f\u0090\u00aa', ' ') post = post.replace('\u00f0\u009f\u0091\u00a5', ' ') post = post.replace('\u00f0\u009f\u0091\u00ac', ' ') post = post.replace('\u00f0\u009f\u0091\u00ad', ' ') post = post.replace('\u00f0\u009f\u0092\u00ad', ' ') post = post.replace('\u00f0\u009f\u0092\u00b6', ' ') post = post.replace('\u00f0\u009f\u0092\u00b7', ' ') post = post.replace('\u00f0\u009f\u0093\u00ac', ' ') post = post.replace('\u00f0\u009f\u0093\u00ad', ' ') post = post.replace('\u00f0\u009f\u0093\u00af', ' ') post = post.replace('\u00f0\u009f\u0093\u00b5', ' ') post = post.replace('\u00f0\u009f\u0094\u0080', ' ') post = post.replace('\u00f0\u009f\u0094\u0081', ' ') post = post.replace('\u00f0\u009f\u0094\u0082', ' ') post = post.replace('\u00f0\u009f\u0094\u0084', ' ') post = post.replace('\u00f0\u009f\u0094\u0085', ' ') post = post.replace('\u00f0\u009f\u0094\u0086', ' ') post = post.replace('\u00f0\u009f\u0094\u0087', ' ') post = post.replace('\u00f0\u009f\u0094\u0089', ' ') post = post.replace('\u00f0\u009f\u0094\u0095', ' ') post = post.replace('\u00f0\u009f\u0094\u00ac', ' ') post = post.replace('\u00f0\u009f\u0094\u00ad', ' ') post = post.replace('\u00f0\u009f\u0095\u009c', ' ') post = post.replace('\u00f0\u009f\u0095\u009d', ' ') post = post.replace('\u00f0\u009f\u0095\u009e', ' ') post = post.replace('\u00f0\u009f\u0095\u009f', ' ') post = post.replace('\u00f0\u009f\u0095\u00a0', ' ') post = post.replace('\u00f0\u009f\u0095\u00a1', ' ') post = post.replace('\u00f0\u009f\u0095\u00a2', ' ') post = post.replace('\u00f0\u009f\u0095\u00a3', ' ') post = post.replace('\u00f0\u009f\u0095\u00a4', ' ') post = post.replace('\u00f0\u009f\u0095\u00a5', ' ') post = post.replace('\u00f0\u009f\u0095\u00a6', ' ') post = post.replace('\u00f0\u009f\u0095\u00a7', ' ') return post def emoticons_sentiment(post): if post != post.replace('\u00f0\u009f\u0091\u008e', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0091\u00b9', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0091\u00ba', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0091\u00bf', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0092\u0094', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0092\u00a2', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0092\u00a3', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0092\u00a5', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0092\u00a9', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u0090', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u0091', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u0092', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u0093', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u0094', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u0095', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u0096', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u009e', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u009f', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a0', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a1', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a2', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a3', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a4', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a5', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a6', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a7', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a8', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00a9', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00aa', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00ab', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00ac', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00ad', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00ae', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00af', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00b0', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00b1', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00b2', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00b3', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00b5', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00b6', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00b7', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00bc', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00be', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0098\u00bf', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0099\u0080', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0099\u0088', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0099\u0089', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u0099\u008a', ' '): return "-1" elif post != post.replace('\u00e2\u0098\u0094', ' '): return "-1" elif post != post.replace('\u00f0\u009f\u008d\u0083', ' '): return "0" elif post != post.replace('\u00f0\u009f\u0098\u0088', ' '): return "0" elif post != post.replace('\u00f0\u009f\u0098\u008f', ' '): return "0" elif post != post.replace('\u00f0\u009f\u0098\u009b', ' '): return "0" elif post != post.replace('\u00f0\u009f\u0098\u009c', ' '): return "0" elif post != post.replace('\u00f0\u009f\u0098\u009d', ' '): return "0" elif post != post.replace('\u00f0\u009f\u0098\u00b4', ' '): return "0" elif post != post.replace('\u00f0\u009f\u008e\u0081', ' '): return "1" elif post != post.replace('\u00f0\u009f\u008e\u0086', ' '): return "1" elif post != post.replace('\u00f0\u009f\u008e\u0087', ' '): return "1" elif post != post.replace('\u00f0\u009f\u008e\u0089', ' '): return "1" elif post != post.replace('\u00f0\u009f\u008e\u008a', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0091\u008b', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0091\u008c', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0091\u008d', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0091\u008f', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u0085', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u008b', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u0093', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u0095', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u0096', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u0097', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u0098', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u0099', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u009a', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u009b', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u009c', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u009d', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u009e', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u009f', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0092\u00aa', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0080', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0081', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0082', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0083', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0084', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0085', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0086', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0087', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0089', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u008a', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u008b', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u008c', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u008d', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u008e', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0097', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0098', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u0099', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u009a', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u00b8', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u00b9', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u00ba', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u00bb', ' '): return "1" elif post != post.replace('\u00f0\u009f\u0098\u00bd', ' '): return "1" elif post != post.replace('\u00e2\u0098\u00ba', ' '): return "1" elif post != post.replace('\u00e2\u0099\u00a5', ' '): return "1" elif post != post.replace('\u00e2\u009c\u008c', ' '): return "1" else: return "none"
52.522936
80
0.600716
7,003
57,250
4.913608
0.017707
0.225516
0.42197
0.474862
0.978931
0.887242
0.282331
0.139465
0.135571
0.012961
0
0.254161
0.172961
57,250
1,089
81
52.571166
0.4721
0
0
0.099907
0
0
0.409999
0.351338
0
0
0
0
0
1
0.002775
false
0
0
0
0.103608
0
0
0
0
null
1
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
644516f8dcf4616b7d3fe99a527e1293071096cf
123
py
Python
scrapy_mongodb/run.py
cffycls/scrapy_mongodb
c48940292da08102f2e9ae818ef84fac2c5f1b96
[ "Apache-2.0" ]
null
null
null
scrapy_mongodb/run.py
cffycls/scrapy_mongodb
c48940292da08102f2e9ae818ef84fac2c5f1b96
[ "Apache-2.0" ]
null
null
null
scrapy_mongodb/run.py
cffycls/scrapy_mongodb
c48940292da08102f2e9ae818ef84fac2c5f1b96
[ "Apache-2.0" ]
null
null
null
from scrapy import cmdline # cmdline.execute('scrapy crawl baidu'.split()) cmdline.execute('scrapy crawl txsp'.split())
20.5
47
0.747967
16
123
5.75
0.5625
0.304348
0.434783
0.543478
0
0
0
0
0
0
0
0
0.113821
123
5
48
24.6
0.844037
0.365854
0
0
0
0
0.223684
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
7
b39672bdc7ae883d3fcf94a4518a5723a01b5738
41
py
Python
model/__init__.py
UnityRobbie/bottomly
ae5f1b9c6ee74392a8525a1c0d611927fe0d7cda
[ "MIT" ]
4
2018-03-12T09:16:49.000Z
2021-07-15T08:21:32.000Z
model/__init__.py
UnityRobbie/bottomly
ae5f1b9c6ee74392a8525a1c0d611927fe0d7cda
[ "MIT" ]
5
2018-03-17T20:27:31.000Z
2020-11-17T09:50:48.000Z
model/__init__.py
UnityRobbie/bottomly
ae5f1b9c6ee74392a8525a1c0d611927fe0d7cda
[ "MIT" ]
7
2018-03-12T10:01:31.000Z
2022-01-18T14:55:00.000Z
import model.karma import model.member
13.666667
20
0.804878
6
41
5.5
0.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0.146341
41
2
21
20.5
0.942857
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
b3b3e3e4980a745f1f1967af940ff8ad52d76bcc
97,049
py
Python
dlkit/json_/assessment/queries.py
UOC/dlkit
a9d265db67e81b9e0f405457464e762e2c03f769
[ "MIT" ]
2
2018-02-23T12:16:11.000Z
2020-10-08T17:54:24.000Z
dlkit/json_/assessment/queries.py
UOC/dlkit
a9d265db67e81b9e0f405457464e762e2c03f769
[ "MIT" ]
87
2017-04-21T18:57:15.000Z
2021-12-13T19:43:57.000Z
dlkit/json_/assessment/queries.py
UOC/dlkit
a9d265db67e81b9e0f405457464e762e2c03f769
[ "MIT" ]
1
2018-03-01T16:44:25.000Z
2018-03-01T16:44:25.000Z
"""JSON implementations of assessment queries.""" # pylint: disable=no-init # Numerous classes don't require __init__. # pylint: disable=too-many-public-methods,too-few-public-methods # Number of methods are defined in specification # pylint: disable=protected-access # Access to protected methods allowed in package json package scope # pylint: disable=too-many-ancestors # Inheritance defined in specification from bson import ObjectId from .. import utilities from ..id.objects import IdList from ..osid import queries as osid_queries from ..primitives import Id from ..utilities import get_registry from dlkit.abstract_osid.assessment import queries as abc_assessment_queries from dlkit.abstract_osid.osid import errors class QuestionQuery(abc_assessment_queries.QuestionQuery, osid_queries.OsidObjectQuery): """This is the query for searching questions. Each method match request produces an ``AND`` term while multiple invocations of a method produces a nested ``OR``. """ def __init__(self, runtime): self._namespace = 'assessment.Question' self._runtime = runtime record_type_data_sets = get_registry('QUESTION_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidObjectQuery.__init__(self, runtime) @utilities.arguments_not_none def get_question_query_record(self, question_record_type): """Gets the question record query corresponding to the given ``Item`` record ``Type``. Multiple retrievals produce a nested ``OR`` term. arg: question_record_type (osid.type.Type): a question record type return: (osid.assessment.records.QuestionQueryRecord) - the question query record raise: NullArgument - ``question_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(question_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class AnswerQuery(abc_assessment_queries.AnswerQuery, osid_queries.OsidObjectQuery): """This is the query for searching answers. Each method match request produces an ``AND`` term while multiple invocations of a method produces a nested ``OR``. """ def __init__(self, runtime): self._namespace = 'assessment.Answer' self._runtime = runtime record_type_data_sets = get_registry('ANSWER_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidObjectQuery.__init__(self, runtime) @utilities.arguments_not_none def get_answer_query_record(self, answer_record_type): """Gets the answer record query corresponding to the given ``Answer`` record ``Type``. Multiple retrievals produce a nested ``OR`` term. arg: answer_record_type (osid.type.Type): an answer record type return: (osid.assessment.records.AnswerQueryRecord) - the answer query record raise: NullArgument - ``answer_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(answer_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class ItemQuery(abc_assessment_queries.ItemQuery, osid_queries.OsidObjectQuery, osid_queries.OsidAggregateableQuery): """This is the query for searching items. Each method match request produces an ``AND`` term while multiple invocations of a method produces a nested ``OR``. """ def __init__(self, runtime): self._namespace = 'assessment.Item' self._runtime = runtime record_type_data_sets = get_registry('ITEM_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidObjectQuery.__init__(self, runtime) @utilities.arguments_not_none def match_learning_objective_id(self, objective_id, match): """Sets the learning objective ``Id`` for this query. arg: objective_id (osid.id.Id): a learning objective ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for negative match raise: NullArgument - ``objective_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('learningObjectiveIds', str(objective_id), bool(match)) def clear_learning_objective_id_terms(self): """Clears all learning objective ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('learningObjectiveIds') learning_objective_id_terms = property(fdel=clear_learning_objective_id_terms) def supports_learning_objective_query(self): """Tests if an ``ObjectiveQuery`` is available. return: (boolean) - ``true`` if a learning objective query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_learning_objective_query(self): """Gets the query for a learning objective. Multiple retrievals produce a nested ``OR`` term. return: (osid.learning.ObjectiveQuery) - the learning objective query raise: Unimplemented - ``supports_learning_objective_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_learning_objective_query()`` is ``true``.* """ raise errors.Unimplemented() learning_objective_query = property(fget=get_learning_objective_query) @utilities.arguments_not_none def match_any_learning_objective(self, match): """Matches an item with any objective. arg: match (boolean): ``true`` to match items with any learning objective, ``false`` to match items with no learning objectives *compliance: mandatory -- This method must be implemented.* """ match_key = 'learningObjectiveIds' param = '$exists' if match: flag = 'true' else: flag = 'false' if match_key in self._query_terms: self._query_terms[match_key][param] = flag else: self._query_terms[match_key] = {param: flag} self._query_terms[match_key]['$nin'] = [[], ['']] def clear_learning_objective_terms(self): """Clears all learning objective terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('learningObjectiveIds') learning_objective_terms = property(fdel=clear_learning_objective_terms) @utilities.arguments_not_none def match_question_id(self, question_id, match): """Sets the question ``Id`` for this query. arg: question_id (osid.id.Id): a question ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``question_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('questionId', str(question_id), match) def clear_question_id_terms(self): """Clears all question ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('questionId') question_id_terms = property(fdel=clear_question_id_terms) def supports_question_query(self): """Tests if a ``QuestionQuery`` is available. return: (boolean) - ``true`` if a question query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_question_query(self): """Gets the query for a question. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.QuestionQuery) - the question query raise: Unimplemented - ``supports_question_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_learning_objective_query()`` is ``true``.* """ raise errors.Unimplemented() question_query = property(fget=get_question_query) @utilities.arguments_not_none def match_any_question(self, match): """Matches an item with any question. arg: match (boolean): ``true`` to match items with any question, ``false`` to match items with no questions *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_question_terms(self): """Clears all question terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() question_terms = property(fdel=clear_question_terms) @utilities.arguments_not_none def match_answer_id(self, answer_id, match): """Sets the answer ``Id`` for this query. arg: answer_id (osid.id.Id): an answer ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``answer_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('answerId', str(answer_id), match) def clear_answer_id_terms(self): """Clears all answer ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('answerId') answer_id_terms = property(fdel=clear_answer_id_terms) def supports_answer_query(self): """Tests if an ``AnswerQuery`` is available. return: (boolean) - ``true`` if an answer query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_answer_query(self): """Gets the query for an answer. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AnswerQuery) - the answer query raise: Unimplemented - ``supports_answer_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_learning_objective_query()`` is ``true``.* """ raise errors.Unimplemented() answer_query = property(fget=get_answer_query) @utilities.arguments_not_none def match_any_answer(self, match): """Matches an item with any answer. arg: match (boolean): ``true`` to match items with any answer, ``false`` to match items with no answers *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_answer_terms(self): """Clears all answer terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() answer_terms = property(fdel=clear_answer_terms) @utilities.arguments_not_none def match_assessment_id(self, assessment_id, match): """Sets the assessment ``Id`` for this query. arg: assessment_id (osid.id.Id): an assessment ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for negative match raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('assessmentId', str(assessment_id), match) def clear_assessment_id_terms(self): """Clears all assessment ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('assessmentId') assessment_id_terms = property(fdel=clear_assessment_id_terms) def supports_assessment_query(self): """Tests if an ``AssessmentQuery`` is available. return: (boolean) - ``true`` if an assessment query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_query(self): """Gets the query for an assessment. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentQuery) - the assessment query raise: Unimplemented - ``supports_assessment_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_query = property(fget=get_assessment_query) @utilities.arguments_not_none def match_any_assessment(self, match): """Matches an item with any assessment. arg: match (boolean): ``true`` to match items with any assessment, ``false`` to match items with no assessments *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_terms(self): """Clears all assessment terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() assessment_terms = property(fdel=clear_assessment_terms) @utilities.arguments_not_none def match_bank_id(self, bank_id, match): """Sets the bank ``Id`` for this query. arg: bank_id (osid.id.Id): a bank ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for negative match raise: NullArgument - ``bank_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_bin_id self._add_match('assignedBankIds', str(bank_id), match) def clear_bank_id_terms(self): """Clears all bank ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms self._clear_terms('assignedBankIds') bank_id_terms = property(fdel=clear_bank_id_terms) def supports_bank_query(self): """Tests if a ``BankQuery`` is available. return: (boolean) - ``true`` if a bank query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_bank_query(self): """Gets the query for a bank. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.BankQuery) - the bank query raise: Unimplemented - ``supports_bank_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bank_query()`` is ``true``.* """ raise errors.Unimplemented() bank_query = property(fget=get_bank_query) def clear_bank_terms(self): """Clears all bank terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('bank') bank_terms = property(fdel=clear_bank_terms) @utilities.arguments_not_none def get_item_query_record(self, item_record_type): """Gets the item record query corresponding to the given ``Item`` record ``Type``. Multiple retrievals produce a nested ``OR`` term. arg: item_record_type (osid.type.Type): an item record type return: (osid.assessment.records.ItemQueryRecord) - the item query record raise: NullArgument - ``item_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(item_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class AssessmentQuery(abc_assessment_queries.AssessmentQuery, osid_queries.OsidObjectQuery): """This is the query for searching assessments. Each method match request produces an ``AND`` term while multiple invocations of a method produces a nested ``OR``. """ def __init__(self, runtime): self._namespace = 'assessment.Assessment' self._runtime = runtime record_type_data_sets = get_registry('ASSESSMENT_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidObjectQuery.__init__(self, runtime) @utilities.arguments_not_none def match_level_id(self, grade_id, match): """Sets the level grade ``Id`` for this query. arg: grade_id (osid.id.Id): a grade ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('levelId', str(grade_id), match) def clear_level_id_terms(self): """Clears all level ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('levelId') level_id_terms = property(fdel=clear_level_id_terms) def supports_level_query(self): """Tests if a ``GradeQuery`` is available. return: (boolean) - ``true`` if a grade query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_level_query(self): """Gets the query for a grade. Multiple retrievals produce a nested ``OR`` term. return: (osid.grading.GradeQuery) - the grade query raise: Unimplemented - ``supports_level_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_level_query()`` is ``true``.* """ raise errors.Unimplemented() level_query = property(fget=get_level_query) @utilities.arguments_not_none def match_any_level(self, match): """Matches an assessment that has any level assigned. arg: match (boolean): ``true`` to match assessments with any level, ``false`` to match assessments with no level *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_level_terms(self): """Clears all level terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('level') level_terms = property(fdel=clear_level_terms) @utilities.arguments_not_none def match_rubric_id(self, assessment_id, match): """Sets the rubric assessment ``Id`` for this query. arg: assessment_id (osid.id.Id): an assessment ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('rubricId', str(assessment_id), match) def clear_rubric_id_terms(self): """Clears all rubric assessment ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('rubricId') rubric_id_terms = property(fdel=clear_rubric_id_terms) def supports_rubric_query(self): """Tests if an ``AssessmentQuery`` is available. return: (boolean) - ``true`` if a rubric assessment query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_rubric_query(self): """Gets the query for a rubric assessment. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentQuery) - the assessment query raise: Unimplemented - ``supports_rubric_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_rubric_query()`` is ``true``.* """ raise errors.Unimplemented() rubric_query = property(fget=get_rubric_query) @utilities.arguments_not_none def match_any_rubric(self, match): """Matches an assessment that has any rubric assessment assigned. arg: match (boolean): ``true`` to match assessments with any rubric, ``false`` to match assessments with no rubric *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_rubric_terms(self): """Clears all rubric assessment terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('rubric') rubric_terms = property(fdel=clear_rubric_terms) @utilities.arguments_not_none def match_item_id(self, item_id, match): """Sets the item ``Id`` for this query. arg: item_id (osid.id.Id): an item ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``item_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('itemIds', str(item_id), match) def clear_item_id_terms(self): """Clears all item ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('itemIds') item_id_terms = property(fdel=clear_item_id_terms) def supports_item_query(self): """Tests if an ``ItemQuery`` is available. return: (boolean) - ``true`` if an item query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_item_query(self): """Gets the query for an item. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.ItemQuery) - the item query raise: Unimplemented - ``supports_item_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_query()`` is ``true``.* """ raise errors.Unimplemented() item_query = property(fget=get_item_query) @utilities.arguments_not_none def match_any_item(self, match): """Matches an assessment that has any item. arg: match (boolean): ``true`` to match assessments with any item, ``false`` to match assessments with no items *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_item_terms(self): """Clears all item terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() item_terms = property(fdel=clear_item_terms) @utilities.arguments_not_none def match_assessment_offered_id(self, assessment_offered_id, match): """Sets the assessment offered ``Id`` for this query. arg: assessment_offered_id (osid.id.Id): an assessment offered ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_offered_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('assessmentOfferedId', str(assessment_offered_id), match) def clear_assessment_offered_id_terms(self): """Clears all assessment offered ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('assessmentOfferedId') assessment_offered_id_terms = property(fdel=clear_assessment_offered_id_terms) def supports_assessment_offered_query(self): """Tests if an ``AssessmentOfferedQuery`` is available. return: (boolean) - ``true`` if an assessment offered query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_offered_query(self): """Gets the query for an assessment offered. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentOfferedQuery) - the assessment offered query raise: Unimplemented - ``supports_assessment_offered_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_offered_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_offered_query = property(fget=get_assessment_offered_query) @utilities.arguments_not_none def match_any_assessment_offered(self, match): """Matches an assessment that has any offering. arg: match (boolean): ``true`` to match assessments with any offering, ``false`` to match assessments with no offerings *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_offered_terms(self): """Clears all assessment offered terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() assessment_offered_terms = property(fdel=clear_assessment_offered_terms) @utilities.arguments_not_none def match_assessment_taken_id(self, assessment_taken_id, match): """Sets the assessment taken ``Id`` for this query. arg: assessment_taken_id (osid.id.Id): an assessment taken ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_taken_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('assessmentTakenId', str(assessment_taken_id), match) def clear_assessment_taken_id_terms(self): """Clears all assessment taken ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('assessmentTakenId') assessment_taken_id_terms = property(fdel=clear_assessment_taken_id_terms) def supports_assessment_taken_query(self): """Tests if an ``AssessmentTakenQuery`` is available. return: (boolean) - ``true`` if an assessment taken query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_taken_query(self): """Gets the query for an assessment taken. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentTakenQuery) - the assessment taken query raise: Unimplemented - ``supports_assessment_taken_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_taken_query = property(fget=get_assessment_taken_query) @utilities.arguments_not_none def match_any_assessment_taken(self, match): """Matches an assessment that has any taken version. arg: match (boolean): ``true`` to match assessments with any taken assessments, ``false`` to match assessments with no taken assessments *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_taken_terms(self): """Clears all assessment taken terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() assessment_taken_terms = property(fdel=clear_assessment_taken_terms) @utilities.arguments_not_none def match_bank_id(self, bank_id, match): """Sets the bank ``Id`` for this query. arg: bank_id (osid.id.Id): a bank ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``bank_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_bin_id self._add_match('assignedBankIds', str(bank_id), match) def clear_bank_id_terms(self): """Clears all bank ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms self._clear_terms('assignedBankIds') bank_id_terms = property(fdel=clear_bank_id_terms) def supports_bank_query(self): """Tests if a ``BankQuery`` is available. return: (boolean) - ``true`` if a bank query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_bank_query(self): """Gets the query for a bank. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.BankQuery) - the bank query raise: Unimplemented - ``supports_bank_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bank_query()`` is ``true``.* """ raise errors.Unimplemented() bank_query = property(fget=get_bank_query) def clear_bank_terms(self): """Clears all bank terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('bank') bank_terms = property(fdel=clear_bank_terms) @utilities.arguments_not_none def get_assessment_query_record(self, assessment_record_type): """Gets the assessment query record corresponding to the given ``Assessment`` record ``Type``. Multiple retrievals produce a nested ``OR`` term. arg: assessment_record_type (osid.type.Type): an assessment record type return: (osid.assessment.records.AssessmentQueryRecord) - the assessment query record raise: NullArgument - ``assessment_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(assessment_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class AssessmentOfferedQuery(abc_assessment_queries.AssessmentOfferedQuery, osid_queries.OsidObjectQuery, osid_queries.OsidSubjugateableQuery): """This is the query for searching assessments. Each method match request produces an ``AND`` term while multiple invocations of a method produces a nested ``OR``. """ def __init__(self, runtime): self._namespace = 'assessment.AssessmentOffered' self._runtime = runtime record_type_data_sets = get_registry('ASSESSMENT_OFFERED_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidObjectQuery.__init__(self, runtime) @utilities.arguments_not_none def match_assessment_id(self, assessment_id, match): """Sets the assessment ``Id`` for this query. arg: assessment_id (osid.id.Id): an assessment ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('assessmentId', str(assessment_id), match) def clear_assessment_id_terms(self): """Clears all assessment ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('assessmentId') assessment_id_terms = property(fdel=clear_assessment_id_terms) def supports_assessment_query(self): """Tests if an ``AssessmentQuery`` is available. return: (boolean) - ``true`` if an assessment query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_query(self): """Gets the query for an assessment. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentQuery) - the assessment query raise: Unimplemented - ``supports_assessment_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_query = property(fget=get_assessment_query) def clear_assessment_terms(self): """Clears all assessment terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('assessment') assessment_terms = property(fdel=clear_assessment_terms) @utilities.arguments_not_none def match_level_id(self, grade_id, match): """Sets the level grade ``Id`` for this query. arg: grade_id (osid.id.Id): a grade ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('levelId', str(grade_id), match) def clear_level_id_terms(self): """Clears all level ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('levelId') level_id_terms = property(fdel=clear_level_id_terms) def supports_level_query(self): """Tests if a ``GradeQuery`` is available. return: (boolean) - ``true`` if a grade query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_level_query(self): """Gets the query for a grade. Multiple retrievals produce a nested ``OR`` term. return: (osid.grading.GradeQuery) - the grade query raise: Unimplemented - ``supports_level_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_level_query()`` is ``true``.* """ raise errors.Unimplemented() level_query = property(fget=get_level_query) @utilities.arguments_not_none def match_any_level(self, match): """Matches an assessment offered that has any level assigned. arg: match (boolean): ``true`` to match offerings with any level, ``false`` to match offerings with no levsls *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_level_terms(self): """Clears all level terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('level') level_terms = property(fdel=clear_level_terms) @utilities.arguments_not_none def match_items_sequential(self, match): """Match sequential assessments. arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_items_sequential_terms(self): """Clears all sequential terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('itemsSequential') items_sequential_terms = property(fdel=clear_items_sequential_terms) @utilities.arguments_not_none def match_items_shuffled(self, match): """Match shuffled item assessments. arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_items_shuffled_terms(self): """Clears all shuffled terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('itemsShuffled') items_shuffled_terms = property(fdel=clear_items_shuffled_terms) @utilities.arguments_not_none def match_start_time(self, start, end, match): """Matches assessments whose start time falls between the specified range inclusive. arg: start (osid.calendaring.DateTime): start of range arg: end (osid.calendaring.DateTime): end of range arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is less than ``start`` *compliance: mandatory -- This method must be implemented.* """ self._match_minimum_date_time('startTime', start, match) self._match_maximum_date_time('startTime', end, match) @utilities.arguments_not_none def match_any_start_time(self, match): """Matches offerings that has any start time assigned. arg: match (boolean): ``true`` to match offerings with any start time, ``false`` to match offerings with no start time *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_start_time_terms(self): """Clears all scheduled terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('startTime') start_time_terms = property(fdel=clear_start_time_terms) @utilities.arguments_not_none def match_deadline(self, start, end, match): """Matches assessments whose end time falls between the specified range inclusive. arg: start (osid.calendaring.DateTime): start of range arg: end (osid.calendaring.DateTime): end of range arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is less than ``start`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._match_minimum_date_time('deadline', start, match) self._match_maximum_date_time('deadline', end, match) @utilities.arguments_not_none def match_any_deadline(self, match): """Matches offerings that have any deadline assigned. arg: match (boolean): ``true`` to match offerings with any deadline, ``false`` to match offerings with no deadline *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_deadline_terms(self): """Clears all deadline terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('deadline') deadline_terms = property(fdel=clear_deadline_terms) @utilities.arguments_not_none def match_duration(self, low, high, match): """Matches assessments whose duration falls between the specified range inclusive. arg: low (osid.calendaring.Duration): start range of duration arg: high (osid.calendaring.Duration): end range of duration arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is less than ``start`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() @utilities.arguments_not_none def match_any_duration(self, match): """Matches offerings that have any duration assigned. arg: match (boolean): ``true`` to match offerings with any duration, ``false`` to match offerings with no duration *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_duration_terms(self): """Clears all duration terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('duration') duration_terms = property(fdel=clear_duration_terms) @utilities.arguments_not_none def match_score_system_id(self, grade_system_id, match): """Sets the grade system ``Id`` for this query. arg: grade_system_id (osid.id.Id): a grade system ``Id`` arg: match (boolean): ``true for a positive match, false for a negative match`` raise: NullArgument - ``grade_system_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('scoreSystemId', str(grade_system_id), match) def clear_score_system_id_terms(self): """Clears all grade system ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('scoreSystemId') score_system_id_terms = property(fdel=clear_score_system_id_terms) def supports_score_system_query(self): """Tests if a ``GradeSystemQuery`` is available. return: (boolean) - ``true`` if a grade system query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_score_system_query(self): """Gets the query for a grade system. Multiple retrievals produce a nested ``OR`` term. return: (osid.grading.GradeSystemQuery) - the grade system query raise: Unimplemented - ``supports_score_system_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_score_system_query()`` is ``true``.* """ raise errors.Unimplemented() score_system_query = property(fget=get_score_system_query) @utilities.arguments_not_none def match_any_score_system(self, match): """Matches taken assessments that have any grade system assigned. arg: match (boolean): ``true`` to match assessments with any grade system, ``false`` to match assessments with no grade system *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_score_system_terms(self): """Clears all grade system terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('scoreSystem') score_system_terms = property(fdel=clear_score_system_terms) @utilities.arguments_not_none def match_grade_system_id(self, grade_system_id, match): """Sets the grade system ``Id`` for this query. arg: grade_system_id (osid.id.Id): a grade system ``Id`` arg: match (boolean): ``true for a positive match, false for a negative match`` raise: NullArgument - ``grade_system_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('gradeSystemId', str(grade_system_id), match) def clear_grade_system_id_terms(self): """Clears all grade system ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('gradeSystemId') grade_system_id_terms = property(fdel=clear_grade_system_id_terms) def supports_grade_system_query(self): """Tests if a ``GradeSystemQuery`` is available. return: (boolean) - ``true`` if a grade system query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_grade_system_query(self): """Gets the query for a grade system. Multiple retrievals produce a nested ``OR`` term. return: (osid.grading.GradeSystemQuery) - the grade system query raise: Unimplemented - ``supports_score_system_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_score_system_query()`` is ``true``.* """ raise errors.Unimplemented() grade_system_query = property(fget=get_grade_system_query) @utilities.arguments_not_none def match_any_grade_system(self, match): """Matches taken assessments that have any grade system assigned. arg: match (boolean): ``true`` to match assessments with any grade system, ``false`` to match assessments with no grade system *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_grade_system_terms(self): """Clears all grade system terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('gradeSystem') grade_system_terms = property(fdel=clear_grade_system_terms) @utilities.arguments_not_none def match_rubric_id(self, assessment_offered_id, match): """Sets the rubric assessment offered ``Id`` for this query. arg: assessment_offered_id (osid.id.Id): an assessment offered ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_offered_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('rubricId', str(assessment_offered_id), match) def clear_rubric_id_terms(self): """Clears all rubric assessment offered ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('rubricId') rubric_id_terms = property(fdel=clear_rubric_id_terms) def supports_rubric_query(self): """Tests if an ``AssessmentOfferedQuery`` is available. return: (boolean) - ``true`` if a rubric assessment offered query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_rubric_query(self): """Gets the query for a rubric assessment. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentOfferedQuery) - the assessment offered query raise: Unimplemented - ``supports_rubric_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_rubric_query()`` is ``true``.* """ raise errors.Unimplemented() rubric_query = property(fget=get_rubric_query) @utilities.arguments_not_none def match_any_rubric(self, match): """Matches an assessment offered that has any rubric assessment assigned. arg: match (boolean): ``true`` to match assessments offered with any rubric, ``false`` to match assessments offered with no rubric *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_rubric_terms(self): """Clears all rubric assessment terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() rubric_terms = property(fdel=clear_rubric_terms) @utilities.arguments_not_none def match_assessment_taken_id(self, assessment_taken_id, match): """Sets the assessment taken ``Id`` for this query. arg: assessment_taken_id (osid.id.Id): an assessment taken ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_taken_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('assessmentTakenId', str(assessment_taken_id), match) def clear_assessment_taken_id_terms(self): """Clears all assessment taken ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('assessmentTakenId') assessment_taken_id_terms = property(fdel=clear_assessment_taken_id_terms) def supports_assessment_taken_query(self): """Tests if an ``AssessmentTakenQuery`` is available. return: (boolean) - ``true`` if an assessment taken query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_taken_query(self): """Gets the query for an assessment taken. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentTakenQuery) - the assessment taken query raise: Unimplemented - ``supports_assessment_taken_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_taken_query = property(fget=get_assessment_taken_query) @utilities.arguments_not_none def match_any_assessment_taken(self, match): """Matches offerings that have any taken assessment version. arg: match (boolean): ``true`` to match offerings with any taken assessment, ``false`` to match offerings with no assessmen taken *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_taken_terms(self): """Clears all assessment taken terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() assessment_taken_terms = property(fdel=clear_assessment_taken_terms) @utilities.arguments_not_none def match_bank_id(self, bank_id, match): """Sets the bank ``Id`` for this query. arg: bank_id (osid.id.Id): a bank ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``bank_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_bin_id self._add_match('assignedBankIds', str(bank_id), match) def clear_bank_id_terms(self): """Clears all bank ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms self._clear_terms('assignedBankIds') bank_id_terms = property(fdel=clear_bank_id_terms) def supports_bank_query(self): """Tests if a ``BankQuery`` is available. return: (boolean) - ``true`` if a bank query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_bank_query(self): """Gets the query for a bank. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.BankQuery) - the bank query raise: Unimplemented - ``supports_bank_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bank_query()`` is ``true``.* """ raise errors.Unimplemented() bank_query = property(fget=get_bank_query) def clear_bank_terms(self): """Clears all bank terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('bank') bank_terms = property(fdel=clear_bank_terms) @utilities.arguments_not_none def get_assessment_offered_query_record(self, assessment_offered_record_type): """Gets the assessment offered query record corresponding to the given ``AssessmentOffered`` record ``Type``. Multiple retrievals produce a nested ``OR`` term. arg: assessment_offered_record_type (osid.type.Type): an assessment offered record type return: (osid.assessment.records.AssessmentOfferedQueryRecord) - the assessment offered query record raise: NullArgument - ``assessment_offered_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(assessment_offered_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class AssessmentTakenQuery(abc_assessment_queries.AssessmentTakenQuery, osid_queries.OsidObjectQuery): """This is the query for searching assessments. Each method match request produces an ``AND`` term while multiple invocations of a method produces a nested ``OR``. """ def __init__(self, runtime): self._namespace = 'assessment.AssessmentTaken' self._runtime = runtime record_type_data_sets = get_registry('ASSESSMENT_TAKEN_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidObjectQuery.__init__(self, runtime) @utilities.arguments_not_none def match_assessment_offered_id(self, assessment_offered_id, match): """Sets the assessment offered ``Id`` for this query. arg: assessment_offered_id (osid.id.Id): an assessment ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_offered_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('assessmentOfferedId', str(assessment_offered_id), match) def clear_assessment_offered_id_terms(self): """Clears all assessment offered ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('assessmentOfferedId') assessment_offered_id_terms = property(fdel=clear_assessment_offered_id_terms) def supports_assessment_offered_query(self): """Tests if an ``AssessmentOfferedQuery`` is available. return: (boolean) - ``true`` if an assessment offered query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_offered_query(self): """Gets the query for an assessment. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentOfferedQuery) - the assessment offered query raise: Unimplemented - ``supports_assessment_offered_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_offered_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_offered_query = property(fget=get_assessment_offered_query) def clear_assessment_offered_terms(self): """Clears all assessment offered terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('assessmentOffered') assessment_offered_terms = property(fdel=clear_assessment_offered_terms) @utilities.arguments_not_none def match_taker_id(self, resource_id, match): """Sets the resource ``Id`` for this query. arg: resource_id (osid.id.Id): a resource ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``resource_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('takerId', str(resource_id), match) def clear_taker_id_terms(self): """Clears all resource ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('takerId') taker_id_terms = property(fdel=clear_taker_id_terms) def supports_taker_query(self): """Tests if a ``ResourceQuery`` is available. return: (boolean) - ``true`` if a resource query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_taker_query(self): """Gets the query for a resource. Multiple retrievals produce a nested ``OR`` term. return: (osid.resource.ResourceQuery) - the resource query raise: Unimplemented - ``supports_taker_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_taker_query()`` is ``true``.* """ raise errors.Unimplemented() taker_query = property(fget=get_taker_query) def clear_taker_terms(self): """Clears all resource terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('taker') taker_terms = property(fdel=clear_taker_terms) @utilities.arguments_not_none def match_taking_agent_id(self, agent_id, match): """Sets the agent ``Id`` for this query. arg: agent_id (osid.id.Id): an agent ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``agent_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._add_match('takingAgentId', str(agent_id), bool(match)) def clear_taking_agent_id_terms(self): """Clears all agent ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('takingAgentId') taking_agent_id_terms = property(fdel=clear_taking_agent_id_terms) def supports_taking_agent_query(self): """Tests if an ``AgentQuery`` is available. return: (boolean) - ``true`` if an agent query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_taking_agent_query(self): """Gets the query for an agent. Multiple retrievals produce a nested ``OR`` term. return: (osid.authentication.AgentQuery) - the agent query raise: Unimplemented - ``supports_taking_agent_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_taking_agent_query()`` is ``true``.* """ raise errors.Unimplemented() taking_agent_query = property(fget=get_taking_agent_query) def clear_taking_agent_terms(self): """Clears all taking agent terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() taking_agent_terms = property(fdel=clear_taking_agent_terms) @utilities.arguments_not_none def match_actual_start_time(self, start, end, match): """Matches assessments whose start time falls between the specified range inclusive. arg: start (osid.calendaring.DateTime): start of range arg: end (osid.calendaring.DateTime): end of range arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is less than ``start`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() @utilities.arguments_not_none def match_any_actual_start_time(self, match): """Matches taken assessments taken that have begun. arg: match (boolean): ``true`` to match assessments taken started, ``false`` to match assessments taken that have not begun *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_actual_start_time_terms(self): """Clears all start time terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() actual_start_time_terms = property(fdel=clear_actual_start_time_terms) @utilities.arguments_not_none def match_completion_time(self, start, end, match): """Matches assessments whose completion time falls between the specified range inclusive. arg: start (osid.calendaring.DateTime): start of range arg: end (osid.calendaring.DateTime): end of range arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``end`` is less than ``start`` raise: NullArgument - ``start`` or ``end`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() @utilities.arguments_not_none def match_any_completion_time(self, match): """Matches taken assessments taken that have completed. arg: match (boolean): ``true`` to match assessments taken completed, ``false`` to match assessments taken that are incomplete *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_completion_time_terms(self): """Clears all in completion time terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() completion_time_terms = property(fdel=clear_completion_time_terms) @utilities.arguments_not_none def match_time_spent(self, low, high, match): """Matches assessments where the time spent falls between the specified range inclusive. arg: low (osid.calendaring.Duration): start of duration range arg: high (osid.calendaring.Duration): end of duration range arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: InvalidArgument - ``high`` is less than ``low`` raise: NullArgument - ``low`` or ``high`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_time_spent_terms(self): """Clears all in time spent terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() time_spent_terms = property(fdel=clear_time_spent_terms) @utilities.arguments_not_none def match_score_system_id(self, grade_system_id, match): """Sets the grade system ``Id`` for this query. arg: grade_system_id (osid.id.Id): a grade system ``Id`` arg: match (boolean): ``true for a positive match, false for a negative match`` raise: NullArgument - ``grade_system_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('scoreSystemId', str(grade_system_id), match) def clear_score_system_id_terms(self): """Clears all grade system ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('scoreSystemId') score_system_id_terms = property(fdel=clear_score_system_id_terms) def supports_score_system_query(self): """Tests if a ``GradeSystemQuery`` is available. return: (boolean) - ``true`` if a grade system query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_score_system_query(self): """Gets the query for a grade system. Multiple retrievals produce a nested ``OR`` term. return: (osid.grading.GradeSystemQuery) - the grade system query raise: Unimplemented - ``supports_score_system_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_score_system_query()`` is ``true``.* """ raise errors.Unimplemented() score_system_query = property(fget=get_score_system_query) @utilities.arguments_not_none def match_any_score_system(self, match): """Matches taken assessments that have any grade system assigned. arg: match (boolean): ``true`` to match assessments with any grade system, ``false`` to match assessments with no grade system *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_score_system_terms(self): """Clears all grade system terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() score_system_terms = property(fdel=clear_score_system_terms) @utilities.arguments_not_none def match_score(self, low, high, match): """Matches assessments whose score falls between the specified range inclusive. arg: low (decimal): start of range arg: high (decimal): end of range arg: match (boolean): ``true`` for a positive match, ``false`` for negative match raise: InvalidArgument - ``high`` is less than ``low`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() @utilities.arguments_not_none def match_any_score(self, match): """Matches taken assessments that have any score assigned. arg: match (boolean): ``true`` to match assessments with any score, ``false`` to match assessments with no score *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_score_terms(self): """Clears all score terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() score_terms = property(fdel=clear_score_terms) @utilities.arguments_not_none def match_grade_id(self, grade_id, match): """Sets the grade ``Id`` for this query. arg: grade_id (osid.id.Id): a grade ``Id`` arg: match (boolean): ``true for a positive match, false for a negative match`` raise: NullArgument - ``grade_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('gradeId', str(grade_id), match) def clear_grade_id_terms(self): """Clears all grade ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('gradeId') grade_id_terms = property(fdel=clear_grade_id_terms) def supports_grade_query(self): """Tests if a ``GradeQuery`` is available. return: (boolean) - ``true`` if a grade query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_grade_query(self): """Gets the query for a grade. Multiple retrievals produce a nested ``OR`` term. return: (osid.grading.GradeQuery) - the grade query raise: Unimplemented - ``supports_grade_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_grade_query()`` is ``true``.* """ raise errors.Unimplemented() grade_query = property(fget=get_grade_query) @utilities.arguments_not_none def match_any_grade(self, match): """Matches taken assessments that have any grade assigned. arg: match (boolean): ``true`` to match assessments with any grade, ``false`` to match assessments with no grade *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_grade_terms(self): """Clears all grade terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() grade_terms = property(fdel=clear_grade_terms) @utilities.arguments_not_none def match_feedback(self, comments, string_match_type, match): """Sets the comment string for this query. arg: comments (string): comment string arg: string_match_type (osid.type.Type): the string match type arg: match (boolean): ``true`` for a positive match, ``false`` for negative match raise: InvalidArgument - ``comments is`` not of ``string_match_type`` raise: NullArgument - ``comments`` or ``string_match_type`` is ``null`` raise: Unsupported - ``supports_string_match_type(string_match_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() @utilities.arguments_not_none def match_any_feedback(self, match): """Matches taken assessments that have any comments. arg: match (boolean): ``true`` to match assessments with any comments, ``false`` to match assessments with no comments *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_feedback_terms(self): """Clears all comment terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() feedback_terms = property(fdel=clear_feedback_terms) @utilities.arguments_not_none def match_rubric_id(self, assessment_taken_id, match): """Sets the rubric assessment taken ``Id`` for this query. arg: assessment_taken_id (osid.id.Id): an assessment taken ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_taken_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_avatar_id self._add_match('rubricId', str(assessment_taken_id), match) def clear_rubric_id_terms(self): """Clears all rubric assessment taken ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_avatar_id self._clear_terms('rubricId') rubric_id_terms = property(fdel=clear_rubric_id_terms) def supports_rubric_query(self): """Tests if an ``AssessmentTakenQuery`` is available. return: (boolean) - ``true`` if a rubric assessment taken query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_rubric_query(self): """Gets the query for a rubric assessment. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentTakenQuery) - the assessment taken query raise: Unimplemented - ``supports_rubric_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_rubric_query()`` is ``true``.* """ raise errors.Unimplemented() rubric_query = property(fget=get_rubric_query) @utilities.arguments_not_none def match_any_rubric(self, match): """Matches an assessment taken that has any rubric assessment assigned. arg: match (boolean): ``true`` to match assessments taken with any rubric, ``false`` to match assessments taken with no rubric *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_rubric_terms(self): """Clears all rubric assessment taken terms. *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() rubric_terms = property(fdel=clear_rubric_terms) @utilities.arguments_not_none def match_bank_id(self, bank_id, match): """Sets the bank ``Id`` for this query. arg: bank_id (osid.id.Id): a bank ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``bank_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.match_bin_id self._add_match('assignedBankIds', str(bank_id), match) def clear_bank_id_terms(self): """Clears all bank ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms self._clear_terms('assignedBankIds') bank_id_terms = property(fdel=clear_bank_id_terms) def supports_bank_query(self): """Tests if a ``BankQuery`` is available. return: (boolean) - ``true`` if a bank query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_bank_query(self): """Gets the query for a bank. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.BankQuery) - the bank query raise: Unimplemented - ``supports_bank_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_bank_query()`` is ``true``.* """ raise errors.Unimplemented() bank_query = property(fget=get_bank_query) def clear_bank_terms(self): """Clears all bank terms. *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceQuery.clear_group_terms self._clear_terms('bank') bank_terms = property(fdel=clear_bank_terms) @utilities.arguments_not_none def get_assessment_taken_query_record(self, assessment_taken_record_type): """Gets the assessment taken query record corresponding to the given ``AssessmentTaken`` record ``Type``. Multiple retrievals produce a nested ``OR`` term. arg: assessment_taken_record_type (osid.type.Type): an assessment taken record type return: (osid.assessment.records.AssessmentTakenQueryRecord) - the assessment taken query record raise: NullArgument - ``assessment_taken_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(assessment_taken_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class BankQuery(abc_assessment_queries.BankQuery, osid_queries.OsidCatalogQuery): """This is the query for searching banks Each method specifies an ``AND`` term while multiple invocations of the same method produce a nested ``OR``.""" def __init__(self, runtime): self._runtime = runtime record_type_data_sets = get_registry('BANK_RECORD_TYPES', runtime) self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_queries.OsidCatalogQuery.__init__(self, runtime) def _get_descendant_catalog_ids(self, catalog_id): hm = self._get_provider_manager('HIERARCHY') hts = hm.get_hierarchy_traversal_session_for_hierarchy( Id(authority='ASSESSMENT', namespace='CATALOG', identifier='BANK') ) # What about the Proxy? descendants = [] if hts.has_children(catalog_id): for child_id in hts.get_children(catalog_id): descendants += list(self._get_descendant_catalog_ids(child_id)) descendants.append(child_id) return IdList(descendants) @utilities.arguments_not_none def match_item_id(self, item_id, match): """Sets the item ``Id`` for this query. arg: item_id (osid.id.Id): an item ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``item_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_item_id_terms(self): """Clears all item ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('itemId') item_id_terms = property(fdel=clear_item_id_terms) def supports_item_query(self): """Tests if a ``ItemQuery`` is available. return: (boolean) - ``true`` if an item query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_item_query(self): """Gets the query for an item. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.ItemQuery) - the item query raise: Unimplemented - ``supports_item_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_item_query()`` is ``true``.* """ raise errors.Unimplemented() item_query = property(fget=get_item_query) @utilities.arguments_not_none def match_any_item(self, match): """Matches assessment banks that have any item assigned. arg: match (boolean): ``true`` to match banks with any item, ``false`` to match assessments with no item *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_item_terms(self): """Clears all item terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('item') item_terms = property(fdel=clear_item_terms) @utilities.arguments_not_none def match_assessment_id(self, assessment_id, match): """Sets the assessment ``Id`` for this query. arg: assessment_id (osid.id.Id): an assessment ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_id_terms(self): """Clears all assessment ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('assessmentId') assessment_id_terms = property(fdel=clear_assessment_id_terms) def supports_assessment_query(self): """Tests if an ``AssessmentQuery`` is available. return: (boolean) - ``true`` if an assessment query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_query(self): """Gets the query for an assessment. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentQuery) - the assessment query raise: Unimplemented - ``supports_assessment_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_query = property(fget=get_assessment_query) @utilities.arguments_not_none def match_any_assessment(self, match): """Matches assessment banks that have any assessment assigned. arg: match (boolean): ``true`` to match banks with any assessment, ``false`` to match banks with no assessment *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_terms(self): """Clears all assessment terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('assessment') assessment_terms = property(fdel=clear_assessment_terms) @utilities.arguments_not_none def match_assessment_offered_id(self, assessment_offered_id, match): """Sets the assessment offered ``Id`` for this query. arg: assessment_offered_id (osid.id.Id): an assessment ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``assessment_offered_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_offered_id_terms(self): """Clears all assessment offered ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('assessmentOfferedId') assessment_offered_id_terms = property(fdel=clear_assessment_offered_id_terms) def supports_assessment_offered_query(self): """Tests if an ``AssessmentOfferedQuery`` is available. return: (boolean) - ``true`` if an assessment offered query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_assessment_offered_query(self): """Gets the query for an assessment offered. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.AssessmentOfferedQuery) - the assessment offered query raise: Unimplemented - ``supports_assessment_offered_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_offered_query()`` is ``true``.* """ raise errors.Unimplemented() assessment_offered_query = property(fget=get_assessment_offered_query) @utilities.arguments_not_none def match_any_assessment_offered(self, match): """Matches assessment banks that have any assessment offering assigned. arg: match (boolean): ``true`` to match banks with any assessment offering, ``false`` to match banks with no offering *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_assessment_offered_terms(self): """Clears all assessment offered terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('assessmentOffered') assessment_offered_terms = property(fdel=clear_assessment_offered_terms) @utilities.arguments_not_none def match_ancestor_bank_id(self, bank_id, match): """Sets the bank ``Id`` for to match banks in which the specified bank is an acestor. arg: bank_id (osid.id.Id): a bank ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``bank_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # matches when the bank_id param is an ancestor of # any bank bank_descendants = self._get_descendant_catalog_ids(bank_id) identifiers = [ObjectId(i.identifier) for i in bank_descendants] self._query_terms['_id'] = {'$in': identifiers} def clear_ancestor_bank_id_terms(self): """Clears all ancestor bank ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('ancestorBankId') ancestor_bank_id_terms = property(fdel=clear_ancestor_bank_id_terms) def supports_ancestor_bank_query(self): """Tests if a ``BankQuery`` is available. return: (boolean) - ``true`` if a bank query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_ancestor_bank_query(self): """Gets the query for an ancestor bank. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.BankQuery) - the bank query raise: Unimplemented - ``supports_ancestor_bank_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_ancestor_bank_query()`` is ``true``.* """ raise errors.Unimplemented() ancestor_bank_query = property(fget=get_ancestor_bank_query) @utilities.arguments_not_none def match_any_ancestor_bank(self, match): """Matches a bank that has any ancestor. arg: match (boolean): ``true`` to match banks with any ancestor banks, ``false`` to match root banks *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_ancestor_bank_terms(self): """Clears all ancestor bank terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('ancestorBank') ancestor_bank_terms = property(fdel=clear_ancestor_bank_terms) @utilities.arguments_not_none def match_descendant_bank_id(self, bank_id, match): """Sets the bank ``Id`` for to match banks in which the specified bank is a descendant. arg: bank_id (osid.id.Id): a bank ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``bank_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_descendant_bank_id_terms(self): """Clears all descendant bank ``Id`` terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('descendantBankId') descendant_bank_id_terms = property(fdel=clear_descendant_bank_id_terms) def supports_descendant_bank_query(self): """Tests if a ``BankQuery`` is available. return: (boolean) - ``true`` if a bank query is available, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def get_descendant_bank_query(self): """Gets the query for a descendant bank. Multiple retrievals produce a nested ``OR`` term. return: (osid.assessment.BankQuery) - the bank query raise: Unimplemented - ``supports_descendant_bank_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_descendant_bank_query()`` is ``true``.* """ raise errors.Unimplemented() descendant_bank_query = property(fget=get_descendant_bank_query) @utilities.arguments_not_none def match_any_descendant_bank(self, match): """Matches a bank that has any descendant. arg: match (boolean): ``true`` to match banks with any descendant banks, ``false`` to match leaf banks *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() def clear_descendant_bank_terms(self): """Clears all descendant bank terms. *compliance: mandatory -- This method must be implemented.* """ self._clear_terms('descendantBank') descendant_bank_terms = property(fdel=clear_descendant_bank_terms) @utilities.arguments_not_none def get_bank_query_record(self, bank_record_type): """Gets the bank query record corresponding to the given ``Bank`` record ``Type``. Multiple record retrievals produce a nested ``OR`` term. arg: bank_record_type (osid.type.Type): a bank record type return: (osid.assessment.records.BankQueryRecord) - the bank query record raise: NullArgument - ``bank_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(bank_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented()
35.745488
156
0.647353
10,955
97,049
5.53382
0.026472
0.033981
0.047573
0.054369
0.898933
0.870412
0.837421
0.810534
0.797866
0.770236
0
0
0.254222
97,049
2,714
157
35.758659
0.8376
0.546106
0
0.686416
0
0
0.036804
0.005123
0
0
0
0
0
1
0.309249
false
0
0.011561
0
0.476879
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
7
b3f5b7dec9d4a1bc5dc2d5c7fb26e2c032628fb2
4,538
py
Python
test/test_nim_players.py
madisonmussari/mcts_kds
86202640de2abc017d32c4db08abf2b32d9c2a70
[ "MIT" ]
1
2021-09-15T04:24:32.000Z
2021-09-15T04:24:32.000Z
test/test_nim_players.py
madisonmussari/mcts_kds
86202640de2abc017d32c4db08abf2b32d9c2a70
[ "MIT" ]
null
null
null
test/test_nim_players.py
madisonmussari/mcts_kds
86202640de2abc017d32c4db08abf2b32d9c2a70
[ "MIT" ]
null
null
null
from context import nim from context import utils from context import players def test_perfect_player_vs_random_player(): environment = nim.Environment([3, 4, 5], 0, 2) player_0 = nim.PerfectPlayer() player_1 = nim.RandomPlayer() log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == 1 assert last_environment.value(1) == -1 def test_random_player_vs_perfect_player(): environment = nim.Environment([3, 4, 5], 0, 2) player_0 = nim.RandomPlayer() player_1 = nim.PerfectPlayer() log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == -1 assert last_environment.value(1) == 1 def test_perfect_player_vs_perfect_player(): environment = nim.Environment([3, 4, 5], 0, 2) player_0 = nim.PerfectPlayer() player_1 = nim.PerfectPlayer() log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == 1 assert last_environment.value(1) == -1 def test_almost_perfect_player_vs_perfect_player_1(): environment = nim.Environment([3, 4, 5], 0, 2) player_0 = nim.AlmostPerfectPlayer([]) # The player has no weaknesses player_1 = nim.PerfectPlayer() log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == 1 assert last_environment.value(1) == -1 def test_almost_perfect_player_vs_perfect_player_2(): environment = nim.Environment([3, 4, 5], 0, 2) player_0 = nim.AlmostPerfectPlayer([[3, 4, 5]]) # The player has no weaknesses player_1 = nim.PerfectPlayer() log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == -1 assert last_environment.value(1) == 1 def test_perfect_player_vs_mcts_player(): environment = nim.Environment([2, 3], 0, 2) player_0 = players.MctsPlayer() player_1 = nim.PerfectPlayer() for _ in range(10000): log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] game_value = [last_environment.value(k) for k in range(last_environment.num_agents())] player_0.cache[last_environment].backpropagation(game_value) player_0.exploration_param=0 log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == 1 assert last_environment.value(1) == -1 def test_almost_perfect_player_vs_mcts_player(): environment = nim.Environment([2, 3], 0, 2) player_0 = nim.AlmostPerfectPlayer([[2, 3]]) player_1 = players.MctsPlayer() utils.play(environment, [player_1, player_0]) for _ in range(1000): log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] game_value = [last_environment.value(k) for k in range(last_environment.num_agents())] player_1.cache[last_environment].backpropagation(game_value) player_1.exploration_param=0 log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == -1 assert last_environment.value(1) == 1 def test_random_player_vs_mcts_player(): environment = nim.Environment([2, 3], 0, 2) player_0 = players.MctsPlayer(exploration_param=0.5) player_1 = nim.RandomPlayer() player_2 = nim.PerfectPlayer() for _ in range(10000): log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] game_value = [last_environment.value(k) for k in range(last_environment.num_agents())] player_0.cache[last_environment].backpropagation(game_value) player_0.exploration_param=0 for _ in range(10): log = utils.play(environment, [player_0, player_1]) (last_environment, _, _) = log[-1] assert last_environment.is_terminal() == True assert last_environment.value(0) == 1 assert last_environment.value(1) == -1
34.378788
94
0.684883
601
4,538
4.866889
0.084859
0.225641
0.172308
0.120342
0.916923
0.909402
0.90188
0.884786
0.884786
0.884786
0
0.042188
0.190392
4,538
131
95
34.641221
0.753947
0.012561
0
0.75
0
0
0
0
0
0
0
0
0.25
1
0.083333
false
0
0.03125
0
0.114583
0
0
0
0
null
1
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
b3f72ae590f28711c00500df2c42c100c46ffdfe
75,408
py
Python
tools/test_tools.py
HeyuanLiu/SPO_plus_public
60f5ea2896ae315e541e220fb8267a7dd485e80c
[ "MIT" ]
1
2022-03-24T09:08:18.000Z
2022-03-24T09:08:18.000Z
tools/test_tools.py
HeyuanLiu/SPO_plus_public
60f5ea2896ae315e541e220fb8267a7dd485e80c
[ "MIT" ]
null
null
null
tools/test_tools.py
HeyuanLiu/SPO_plus_public
60f5ea2896ae315e541e220fb8267a7dd485e80c
[ "MIT" ]
null
null
null
import itertools import numpy as np import torch from torch import nn import pandas as pd from itertools import permutations from tools import loss_func_tools from tools import data_generation_tools from tools import spo_framework from tools import prediction_tools from tools import optimization_oracle_tools from tools import optim_tools def portfolio_model_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio'): n_features = model_params['n_features'] n_samples = model_params['n_samples'] dim_cost = model_params['dim_cost'] # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map = { 'spop': loss_func_tools.spop_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } pred_model_map = { 'linear': prediction_tools.linear_prediction_model, 'two_layers': prediction_tools.two_layers_model, } pred_model_back_map = { 'linear': prediction_tools.linear_prediction_model_back, 'two_layers': prediction_tools.two_layers_model_back, } data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, } optimization_params = {'r': np.log(dim_cost) - np.log(dim_cost - 0.9)} # optimization_params = {'r': np.log(dim_cost) / 2} baseline_action = torch.ones(dim_cost) / dim_cost test_results = pd.DataFrame(columns=data_param_name + [ 'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline']) def _clone_params(num_params): num_params_copy = {} for num_param in num_params: num_params_copy[num_param] = num_params[num_param].detach().clone() return num_params_copy for param_value in itertools.product(*data_param_value, range(n_trails)): if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value): param[name] = value x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param) actions_hindsight, _ = optimization_oracle_tools.entropy_oracle(y_test, optimization_params, False) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef) for pred_model in pred_model_list: if pred_model == 'linear': initial_params = { 'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')), 'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')) } elif pred_model == 'two_layers': hidden_dim = model_params.get('hidden_dim', 256) initial_params = { 'W1': torch.from_numpy( (np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')), 'W2': torch.from_numpy( (np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')), 'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')), 'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')), } else: raise Exception( 'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model) for j, loss_func in enumerate(loss_list): if pred_model == 'two_layers' and loss_func == 'l2': lr = 0.01 elif pred_model == 'linear': if loss_func == 'spo': lr = 1. else: lr = 0.1 else: lr = 1. spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': pred_model_map[pred_model], 'model_params': _clone_params(initial_params), 'predict_model_back': pred_model_back_map[pred_model], 'optimization_oracle': optimization_oracle_tools.entropy_oracle, 'optimization_params': optimization_params, 'optimization_oracle_back': optimization_oracle_tools.entropy_oracle_back, 'loss_func': loss_map[loss_func], 'optimizer': optim_tools.adam, # 'optimizer': optim_tools.sgd_momentum, # Notes: # SPO, teo layers: lr = 1.0 # 'optimizer_config': {'learning_rate': lr, 'momentum': 0.9, 'lr_decay': 0.995}, 'require_grad': True, }) loss = spo_model.update( x_input, y_input, num_iter=20000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight}, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, ] return test_results def portfolio_model_excess_risk_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio'): n_features = model_params['n_features'] n_samples = model_params['n_samples'] dim_cost = model_params['dim_cost'] # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map = { 'spop': loss_func_tools.spop_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } pred_model_map = { 'linear': prediction_tools.linear_prediction_model, 'two_layers': prediction_tools.two_layers_model, } pred_model_back_map = { 'linear': prediction_tools.linear_prediction_model_back, 'two_layers': prediction_tools.two_layers_model_back, } data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, } optimization_params = {'r': np.log(dim_cost) - np.log(dim_cost - 0.9)} # optimization_params = {'r': np.log(dim_cost) / 2} baseline_action = torch.ones(dim_cost) / dim_cost test_results = pd.DataFrame(columns=data_param_name + [ 'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'normal_mean_spo_loss']) def _clone_params(num_params): num_params_copy = {} for num_param in num_params: num_params_copy[num_param] = num_params[num_param].detach().clone() return num_params_copy for param_value in itertools.product(*data_param_value, range(n_trails)): if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value): param[name] = value x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param) actions_hindsight, _ = optimization_oracle_tools.entropy_oracle(y_test, optimization_params, False) y_mean = model_coef['c_mean'].detach().clone() acction_y_mean, _ = optimization_oracle_tools.entropy_oracle(y_mean, optimization_params, False) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef) flag_mean_spo_loss = True for pred_model in pred_model_list: if pred_model == 'linear': initial_params = { 'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')), 'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')) } elif pred_model == 'two_layers': hidden_dim = model_params.get('hidden_dim', 256) initial_params = { 'W1': torch.from_numpy( (np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')), 'W2': torch.from_numpy( (np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')), 'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')), 'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')), } else: raise Exception( 'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model) for j, loss_func in enumerate(loss_list): if pred_model == 'two_layers' and loss_func == 'l2': lr = 0.01 elif pred_model == 'linear': if loss_func == 'spo': lr = 1. else: lr = 0.1 else: lr = 1. spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': pred_model_map[pred_model], 'model_params': _clone_params(initial_params), 'predict_model_back': pred_model_back_map[pred_model], 'optimization_oracle': optimization_oracle_tools.entropy_oracle, 'optimization_params': optimization_params, 'optimization_oracle_back': optimization_oracle_tools.entropy_oracle_back, 'loss_func': loss_map[loss_func], 'optimizer': optim_tools.adam, # 'optimizer': optim_tools.sgd_momentum, # Notes: # SPO, teo layers: lr = 1.0 # 'optimizer_config': {'learning_rate': lr, 'momentum': 0.9, 'lr_decay': 0.995}, 'require_grad': True, }) loss = spo_model.update( x_input, y_input, num_iter=20000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight, 'cost_mean': y_mean, 'action_cost_mean': acction_y_mean, }, if_test_ini=if_test_ini and (j == 0), if_mean_spo_loss=flag_mean_spo_loss, ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if flag_mean_spo_loss: loss_mean = loss['loss_mean'] normal_spo_loss_mean = loss_mean / hindsight flag_mean_spo_loss = False if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, normal_spo_loss_mean, ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, normal_spo_loss_mean, ] return test_results def portfolio_argmax_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio'): n_features = model_params['n_features'] n_samples = model_params['n_samples'] dim_cost = model_params['dim_cost'] hidden_dim = model_params.get('hidden_dim', 128) minmax = model_params.get('min/max', 'max') # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map = { 'spop': loss_func_tools.spop_argmax_loss_func, # 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } def pred_model_map(pred_model): if pred_model == 'linear': return nn.Sequential( nn.Linear(in_features=n_features, out_features=dim_cost), ) elif pred_model == 'two_layers': return nn.Sequential( nn.Linear(in_features=n_features, out_features=hidden_dim), nn.ReLU(), nn.Linear(in_features=hidden_dim, out_features=dim_cost), ) else: raise Exception('Prediction Model Type Error!') data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, } baseline_action = torch.ones(dim_cost) / dim_cost optimization_params = {'const': None} test_results = pd.DataFrame(columns=data_param_name + [ 'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline']) for param_value in itertools.product(*data_param_value, range(n_trails)): if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value): param[name] = value ################################ # Something new here about neg # ################################ neg = minmax == 'max' x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param, neg=neg) actions_hindsight, _ = optimization_oracle_tools.softmax_oracle(y_test, optimization_params, False) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg) for pred_model in pred_model_list: for j, loss_func in enumerate(loss_list): predict_model = pred_model_map(pred_model) spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': predict_model, 'optimization_oracle': optimization_oracle_tools.softmax_oracle, 'optimization_params': optimization_params, 'loss_func': loss_map[loss_func], 'optimizer': torch.optim.Adam(predict_model.parameters()), 'require_grad': False, 'minibatch_size': 64, 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=10000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight}, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight) normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, ] return test_results def barrier_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio'): n_features = model_params['n_features'] n_samples = model_params['n_samples'] dim_cost = model_params['dim_cost'] # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map = { 'spop': loss_func_tools.spop_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } pred_model_map = { 'linear': prediction_tools.linear_prediction_model, 'two_layers': prediction_tools.two_layers_model, } pred_model_back_map = { 'linear': prediction_tools.linear_prediction_model_back, 'two_layers': prediction_tools.two_layers_model_back, } data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, 'multi_class': data_generation_tools.multi_class_data, } optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)} # optimization_params = {'r': np.log(dim_cost) / 2} baseline_action = torch.ones(dim_cost, dtype=torch.float64) / dim_cost test_results = pd.DataFrame(columns=data_param_name + [ 'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline']) def _clone_params(num_params): num_params_copy = {} for num_param in num_params: num_params_copy[num_param] = num_params[num_param].detach().clone() return num_params_copy for param_value in itertools.product(*data_param_value, range(n_trails)): if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value): param[name] = value x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param, neg=False) actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False) argmin_hindsight = y_test.argmin(dim=1, keepdim=True) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=False) for pred_model in pred_model_list: if pred_model == 'linear': initial_params = { 'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float64')), 'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float64')) } elif pred_model == 'two_layers': hidden_dim = model_params.get('hidden_dim', 256) initial_params = { 'W1': torch.from_numpy( (np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float64')), 'W2': torch.from_numpy( (np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float64')), 'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float64')), 'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float64')), } else: raise Exception( 'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model) for j, loss_func in enumerate(loss_list): if pred_model == 'two_layers' and loss_func == 'l2': lr = 0.01 elif pred_model == 'linear': if loss_func == 'spo': lr = 1. else: lr = 0.1 else: lr = 1. spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': pred_model_map[pred_model], 'model_params': _clone_params(initial_params), 'predict_model_back': pred_model_back_map[pred_model], 'optimization_oracle': optimization_oracle_tools.barrier_oracle, 'optimization_params': optimization_params, 'test_optimization_oracle': optimization_oracle_tools.argmin_test, 'test_optimization_params': {'arg': 'min'}, 'optimization_oracle_back': optimization_oracle_tools.barrier_oracle_back, 'loss_func': loss_map[loss_func], 'optimizer': optim_tools.adam, # 'optimizer': optim_tools.sgd_momentum, # Notes: # SPO, teo layers: lr = 1.0 'optimizer_config': {'learning_rate': 0.1, 'lr_decay': 0.99}, 'require_grad': True, 'if_argmax': True, 'minibatch_size': 8, }) loss = spo_model.update( x_input, y_input, num_iter=3000, if_quiet=False, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight, 'argmin_hindsight': argmin_hindsight, }, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, 'test spo loss:', loss_test, 'best cost in hindsight', hindsight) normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, ] return test_results def shortest_path_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='shortest_path'): n_features = model_params['n_features'] n_samples = model_params['n_samples'] dim_cost = model_params['dim_cost'] hidden_dim = model_params.get('hidden_dim', 128) grid_dim = model_params.get('grid_dim', 4) assert dim_cost == 2 * grid_dim * (grid_dim - 1), 'cost dim doesnot match grid dim!' min_max = model_params.get('min_max', 'min') # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map = { 'spop': loss_func_tools.spop_loss_func, # 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } def pred_model_map(_pred_model): if _pred_model == 'linear': return nn.Sequential( nn.Linear(in_features=n_features, out_features=dim_cost), ) elif _pred_model == 'two_layers': return nn.Sequential( nn.Linear(in_features=n_features, out_features=hidden_dim), nn.ReLU(), nn.Linear(in_features=hidden_dim, out_features=dim_cost), ) else: raise Exception('Prediction Model Type Error!') data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, } baseline_action = torch.zeros(dim_cost) # optimization_params = {'const': None} test_results = pd.DataFrame(columns=data_param_name + [ 'i', 'n_samples', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline']) def _path_decoding(_grid_dim, path_encoded): loc_x, loc_y = 0, 0 num_edges = _grid_dim * (_grid_dim - 1) path_decoded = np.zeros(2 * num_edges) for direction in path_encoded: if direction: path_decoded[1 * loc_x + (_grid_dim - 1) * loc_y + num_edges] = 1 loc_x += 1 else: path_decoded[(_grid_dim - 1) * loc_x + 1 * loc_y] = 1 loc_y += 1 return path_decoded def _construct_grid_path(_grid_dim): assert _grid_dim >= 2, 'Grid dim at least 2!' path_0 = [0] * (_grid_dim - 1) + [1] * (_grid_dim - 1) paths_encoded = list(set(permutations(path_0))) paths = [] for path_encoded in paths_encoded: paths.append(_path_decoding(_grid_dim, path_encoded)) paths = np.array(paths, dtype='float32') return torch.from_numpy(paths) optimization_params = { 'paths': _construct_grid_path(grid_dim), 'min_max': min_max, } for param_value in itertools.product(*data_param_value, range(n_trails)): if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value): param[name] = value ################################ # Something new here about neg # ################################ neg = min_max == 'max' x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param, neg=neg) actions_hindsight, _ = optimization_oracle_tools.shortest_path_oracle(y_test, optimization_params, False) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg) for pred_model in pred_model_list: print(pred_model) for j, loss_func in enumerate(loss_list): predict_model = pred_model_map(pred_model) spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': predict_model, 'optimization_oracle': optimization_oracle_tools.shortest_path_oracle, 'optimization_params': optimization_params, 'loss_func': loss_map[loss_func], 'optimizer': torch.optim.Adam(predict_model.parameters()), 'require_grad': False, 'minibatch_size': 64, 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=10000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight}, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, 'test spo loss:', loss_test, 'best cost in hindsight', hindsight) normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ n_samples, loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, ] return test_results def barrier_vs_argmin_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio'): n_features = model_params['n_features'] n_samples_list = model_params['n_samples'] dim_cost = model_params['dim_cost'] neg = model_params.get('neg', False) # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map_barrier = { 'spop': loss_func_tools.spop_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } loss_map_argmin = { 'spop': loss_func_tools.spop_argmax_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } pred_model_map = { 'linear': prediction_tools.linear_prediction_model, 'two_layers': prediction_tools.two_layers_model, } pred_model_back_map = { 'linear': prediction_tools.linear_prediction_model_back, 'two_layers': prediction_tools.two_layers_model_back, } data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, 'multi_class': data_generation_tools.multi_class_data, } optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)} # optimization_params = {'r': np.log(dim_cost) / 2} baseline_action = torch.ones(dim_cost) / dim_cost test_results = pd.DataFrame(columns=data_param_name + [ 'n_samples', 'i', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'type']) def _clone_params(num_params): num_params_copy = {} for num_param in num_params: num_params_copy[num_param] = num_params[num_param].detach().clone() return num_params_copy def _pred_model_map(_pred_model): if _pred_model == 'linear': return nn.Sequential( nn.Linear(in_features=n_features, out_features=dim_cost), ) elif _pred_model == 'two_layers': return nn.Sequential( nn.Linear(in_features=n_features, out_features=hidden_dim), nn.ReLU(), nn.Linear(in_features=hidden_dim, out_features=dim_cost), ) else: raise Exception('Prediction Model Type Error!') for param_value_tuple in itertools.product(*data_param_value, n_samples_list, range(n_trails)): param_value = list(param_value_tuple) n_samples = param_value[-2] if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value[:-2]): param[name] = value print(param, param_value) x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param, neg=neg) actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False) argmin_hindsight = y_test.argmin(dim=1, keepdim=True) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg) for pred_model in pred_model_list: if pred_model == 'linear': initial_params = { 'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')), 'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')) } elif pred_model == 'two_layers': hidden_dim = model_params.get('hidden_dim', 256) initial_params = { 'W1': torch.from_numpy( (np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')), 'W2': torch.from_numpy( (np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')), 'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')), 'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')), } else: raise Exception( 'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model) for j, loss_func in enumerate(loss_list): spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': pred_model_map[pred_model], 'model_params': _clone_params(initial_params), 'predict_model_back': pred_model_back_map[pred_model], 'optimization_oracle': optimization_oracle_tools.barrier_oracle, 'optimization_params': optimization_params, 'test_optimization_oracle': optimization_oracle_tools.argmin_test, 'test_optimization_params': {'arg': 'min'}, 'optimization_oracle_back': optimization_oracle_tools.barrier_oracle_back, 'loss_func': loss_map_barrier[loss_func], 'optimizer': optim_tools.adam, # 'optimizer': optim_tools.sgd_momentum, # Notes: # SPO, teo layers: lr = 1.0 'optimizer_config': {'learning_rate': 0.1, 'lr_decay': 0.999}, 'require_grad': True, 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=5000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight, 'argmin_hindsight': argmin_hindsight, }, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight, 'barrier') normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, 'barrier', ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, 'barrier', ] print('argmin start.') predict_model = _pred_model_map(pred_model) spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': predict_model, 'optimization_oracle': optimization_oracle_tools.softmax_oracle, 'optimization_params': {'const': None}, 'loss_func': loss_map_argmin[loss_func], 'optimizer': torch.optim.Adam(predict_model.parameters()), 'require_grad': False, 'minibatch_size': min(64, n_samples), 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=10000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': argmin_hindsight}, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight, 'argmin') normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, 'argmin', ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, 'argmin', ] return test_results def barrier_vs_argmin_excess_risk_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio'): n_features = model_params['n_features'] n_samples_list = model_params['n_samples'] dim_cost = model_params['dim_cost'] neg = model_params.get('neg', False) # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map_barrier = { 'spop': loss_func_tools.spop_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } loss_map_argmin = { 'spop': loss_func_tools.spop_argmax_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } pred_model_map = { 'linear': prediction_tools.linear_prediction_model, 'two_layers': prediction_tools.two_layers_model, } pred_model_back_map = { 'linear': prediction_tools.linear_prediction_model_back, 'two_layers': prediction_tools.two_layers_model_back, } data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, 'multi_class': data_generation_tools.multi_class_data, } optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)} # optimization_params = {'r': np.log(dim_cost) / 2} baseline_action = torch.ones(dim_cost) / dim_cost test_results = pd.DataFrame(columns=data_param_name + [ 'n_samples', 'i', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'normal_mean_spo_loss', 'type']) def _clone_params(num_params): num_params_copy = {} for num_param in num_params: num_params_copy[num_param] = num_params[num_param].detach().clone() return num_params_copy def _pred_model_map(_pred_model): if _pred_model == 'linear': return nn.Sequential( nn.Linear(in_features=n_features, out_features=dim_cost), ) elif _pred_model == 'two_layers': return nn.Sequential( nn.Linear(in_features=n_features, out_features=hidden_dim), nn.ReLU(), nn.Linear(in_features=hidden_dim, out_features=dim_cost), ) else: raise Exception('Prediction Model Type Error!') for param_value_tuple in itertools.product(*data_param_value, n_samples_list, range(n_trails)): param_value = list(param_value_tuple) n_samples = param_value[-2] if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value[:-2]): param[name] = value print(param, param_value) x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param, neg=neg) actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False) argmin_hindsight = y_test.argmin(dim=1, keepdim=True) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg) y_mean = model_coef['c_mean'].detach().clone() action_y_mean, _ = optimization_oracle_tools.barrier_oracle(y_mean, optimization_params, False) argmin_hindsight_ymean = y_mean.argmin(dim=1, keepdim=True) flag_mean_spo_loss = True for pred_model in pred_model_list: if pred_model == 'linear': initial_params = { 'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')), 'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')) } elif pred_model == 'two_layers': hidden_dim = model_params.get('hidden_dim', 256) initial_params = { 'W1': torch.from_numpy( (np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')), 'W2': torch.from_numpy( (np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')), 'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')), 'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')), } else: raise Exception( 'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model) for j, loss_func in enumerate(loss_list): # spo_model = spo_framework.SpoTest({ # 'n_features': n_features, # 'dim_cost': dim_cost, # 'baseline_action': baseline_action, # 'predict_model': pred_model_map[pred_model], # 'model_params': _clone_params(initial_params), # 'predict_model_back': pred_model_back_map[pred_model], # 'optimization_oracle': optimization_oracle_tools.barrier_oracle, # 'optimization_params': optimization_params, # 'test_optimization_oracle': optimization_oracle_tools.argmin_test, # 'test_optimization_params': {'arg': 'min'}, # 'optimization_oracle_back': optimization_oracle_tools.barrier_oracle_back, # 'loss_func': loss_map_barrier[loss_func], # 'optimizer': optim_tools.adam, # # 'optimizer': optim_tools.sgd_momentum, # # Notes: # # SPO, teo layers: lr = 1.0 # 'optimizer_config': {'learning_rate': 0.1, 'lr_decay': 0.999}, # 'require_grad': True, # 'if_argmax': True, # }) # # loss = spo_model.update( # x_input, y_input, num_iter=20000, if_quiet=True, # test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight, # 'argmin_hindsight': argmin_hindsight, 'cost_mean': y_mean, # 'action_cost_mean': action_y_mean, 'argmin_hindsight_ymean': argmin_hindsight_ymean, # }, # if_test_ini=if_test_ini and (j == 0), # ) # # loss_test = loss['loss_spo_test'] # hindsight = loss['hindsight'] # print(loss_func, pred_model, loss_test, hindsight, 'barrier') # normal_spo = loss_test / hindsight # train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight # if loss['loss_spo_baseline'] is not None: # baseline_spo = loss['loss_spo_baseline'] / hindsight # else: # baseline_spo = None # # if if_test_ini: # if j == 0: # loss_ini = loss['loss_spo_test_ini'] # hind_ini = loss['hindsight_ini'] # spo_ini = loss_ini / hind_ini # test_results.loc[len(test_results.index)] = list(param_value) + [ # loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, # baseline_spo, 'barrier', # ] # else: # test_results.loc[len(test_results.index)] = list(param_value) + [ # loss_func, pred_model, normal_spo, hindsight, train_spo, None, # baseline_spo, 'barrier', # ] print('argmin start.') predict_model = _pred_model_map(pred_model) spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': predict_model, 'optimization_oracle': optimization_oracle_tools.softmax_oracle, 'optimization_params': {'const': None}, 'loss_func': loss_map_argmin[loss_func], 'optimizer': torch.optim.Adam(predict_model.parameters()), 'require_grad': False, 'minibatch_size': min(64, n_samples), 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=20000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': argmin_hindsight, 'cost_mean': y_mean, 'action_cost_mean': argmin_hindsight_ymean, }, if_test_ini=if_test_ini and (j == 0), if_mean_spo_loss=flag_mean_spo_loss, ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight, 'argmin') normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None ########## New ############### if flag_mean_spo_loss: loss_mean = loss['loss_mean'] normal_spo_loss_mean = loss_mean / hindsight flag_mean_spo_loss = False ########## New ############### if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, normal_spo_loss_mean, 'argmin', ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, normal_spo_loss_mean, 'argmin', ] return test_results def entropy_vs_argmin_test(model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio'): n_features = model_params['n_features'] n_samples_list = model_params['n_samples'] dim_cost = model_params['dim_cost'] neg = model_params.get('neg', False) # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map_barrier = { 'spop': loss_func_tools.spop_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } loss_map_argmin = { 'spop': loss_func_tools.spop_argmax_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } pred_model_map = { 'linear': prediction_tools.linear_prediction_model, 'two_layers': prediction_tools.two_layers_model, } pred_model_back_map = { 'linear': prediction_tools.linear_prediction_model_back, 'two_layers': prediction_tools.two_layers_model_back, } data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, 'multi_class': data_generation_tools.multi_class_data, } optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)} # optimization_params = {'r': np.log(dim_cost) / 2} baseline_action = torch.ones(dim_cost) / dim_cost test_results = pd.DataFrame(columns=data_param_name + [ 'n_samples', 'i', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'type']) def _clone_params(num_params): num_params_copy = {} for num_param in num_params: num_params_copy[num_param] = num_params[num_param].detach().clone() return num_params_copy def _pred_model_map(_pred_model): if _pred_model == 'linear': return nn.Sequential( nn.Linear(in_features=n_features, out_features=dim_cost), ) elif _pred_model == 'two_layers': return nn.Sequential( nn.Linear(in_features=n_features, out_features=hidden_dim), nn.ReLU(), nn.Linear(in_features=hidden_dim, out_features=dim_cost), ) else: raise Exception('Prediction Model Type Error!') for param_value_tuple in itertools.product(*data_param_value, n_samples_list, range(n_trails)): param_value = list(param_value_tuple) n_samples = param_value[-2] if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value[:-2]): param[name] = value print(param, param_value) x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param, neg=neg) actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False) argmin_hindsight = y_test.argmin(dim=1, keepdim=True) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg) for pred_model in pred_model_list: if pred_model == 'linear': initial_params = { 'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')), 'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')) } elif pred_model == 'two_layers': hidden_dim = model_params.get('hidden_dim', 256) initial_params = { 'W1': torch.from_numpy( (np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')), 'W2': torch.from_numpy( (np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')), 'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')), 'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')), } else: raise Exception( 'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model) for j, loss_func in enumerate(loss_list): spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': pred_model_map[pred_model], 'model_params': _clone_params(initial_params), 'predict_model_back': pred_model_back_map[pred_model], 'optimization_oracle': optimization_oracle_tools.barrier_oracle, 'optimization_params': optimization_params, 'test_optimization_oracle': optimization_oracle_tools.argmin_test, 'test_optimization_params': {'arg': 'min'}, 'optimization_oracle_back': optimization_oracle_tools.barrier_oracle_back, 'loss_func': loss_map_barrier[loss_func], 'optimizer': optim_tools.adam, # 'optimizer': optim_tools.sgd_momentum, # Notes: # SPO, teo layers: lr = 1.0 'optimizer_config': {'learning_rate': 0.1, 'lr_decay': 0.999}, 'require_grad': True, 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=5000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight, 'argmin_hindsight': argmin_hindsight, }, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight, 'barrier') normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, 'barrier', ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, 'barrier', ] print('argmin start.') predict_model = _pred_model_map(pred_model) spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': predict_model, 'optimization_oracle': optimization_oracle_tools.softmax_oracle, 'optimization_params': {'const': None}, 'loss_func': loss_map_argmin[loss_func], 'optimizer': torch.optim.Adam(predict_model.parameters()), 'require_grad': False, 'minibatch_size': min(64, n_samples), 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=10000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': argmin_hindsight}, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight, 'argmin') normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, 'argmin', ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, 'argmin', ] return test_results def multi_class_barrier_vs_argmin_test( model_params, data_params, test_params, loss_list, pred_model_list, if_test_ini=False, data_gen_model='portfolio', ): n_features = model_params['n_features'] n_samples_list = model_params['n_samples'] dim_cost = model_params['dim_cost'] neg = model_params.get('neg', False) # deg_list = data_params['deg'] # tau_list = data_params['tau'] # n_factors_list = data_params['n_factors'] data_param_name, data_param_value = [], [] for param_name in data_params: data_param_name.append(param_name) data_param_value.append(data_params[param_name]) test_set_size = test_params['test_size'] n_trails = test_params['n_trails'] loss_map_barrier = { 'spop': loss_func_tools.spop_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } loss_map_argmin = { 'spop': loss_func_tools.spop_argmax_loss_func, 'spo': loss_func_tools.spo_loss_func, 'l2': loss_func_tools.mse_loss_func, 'l1': loss_func_tools.abs_loss_func, } pred_model_map = { 'linear': prediction_tools.linear_prediction_model, 'two_layers': prediction_tools.two_layers_model, } pred_model_back_map = { 'linear': prediction_tools.linear_prediction_model_back, 'two_layers': prediction_tools.two_layers_model_back, } data_gen_map = { 'portfolio': data_generation_tools.portfolio_data, 'shortest_path': data_generation_tools.shortest_path_data, 'multi_class': data_generation_tools.multi_class_data, } optimization_params = {'r': 2 * dim_cost * np.log(dim_cost)} # optimization_params = {'r': np.log(dim_cost) / 2} baseline_action = torch.ones(dim_cost) / dim_cost test_results = pd.DataFrame(columns=data_param_name + [ 'n_samples', 'i', 'surrogate_loss_func', 'pred_model', 'normalized_spo_loss', 'hindsight', 'train_normal_spo', 'normalized_spo_ini', 'normal_spo_baseline', 'type']) def _clone_params(num_params): num_params_copy = {} for num_param in num_params: num_params_copy[num_param] = num_params[num_param].detach().clone() return num_params_copy def _pred_model_map(_pred_model): if _pred_model == 'linear': return nn.Sequential( nn.Linear(in_features=n_features, out_features=dim_cost), ) elif _pred_model == 'two_layers': return nn.Sequential( nn.Linear(in_features=n_features, out_features=hidden_dim), nn.ReLU(), nn.Linear(in_features=hidden_dim, out_features=dim_cost), ) else: raise Exception('Prediction Model Type Error!') for param_value_tuple in itertools.product(*data_param_value, n_samples_list, range(n_trails)): param_value = list(param_value_tuple) n_samples = param_value[-2] if param_value[-1] == 0: print(param_value) param = {} for name, value in zip(data_param_name, param_value[:-2]): param[name] = value print(param, param_value) x_test, y_test, model_coef = data_gen_map[data_gen_model]( n_features, test_set_size, dim_cost, param, neg=neg) actions_hindsight, _ = optimization_oracle_tools.barrier_oracle(y_test, optimization_params, False) argmin_hindsight = y_test.argmin(dim=1, keepdim=True) x_input, y_input, _ = data_gen_map[data_gen_model]( n_features, n_samples, dim_cost, param, model_coef=model_coef, neg=neg) for pred_model in pred_model_list: if pred_model == 'linear': initial_params = { 'W': torch.from_numpy(np.random.normal(size=(n_features, dim_cost)).astype('float32')), 'b': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')) } elif pred_model == 'two_layers': hidden_dim = model_params.get('hidden_dim', 256) initial_params = { 'W1': torch.from_numpy( (np.random.normal(size=(n_features, hidden_dim)) / np.sqrt(hidden_dim)).astype('float32')), 'W2': torch.from_numpy( (np.random.normal(size=(hidden_dim, dim_cost)) / np.sqrt(dim_cost)).astype('float32')), 'b1': torch.from_numpy(np.random.normal(size=hidden_dim).astype('float32')), 'b2': torch.from_numpy(np.random.normal(size=dim_cost).astype('float32')), } else: raise Exception( 'Prediction model can only be "linear" or "two_layers". The input is: ' + pred_model) for j, loss_func in enumerate(loss_list): if loss_func == 'spop': spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': pred_model_map[pred_model], 'model_params': _clone_params(initial_params), 'predict_model_back': pred_model_back_map[pred_model], 'optimization_oracle': optimization_oracle_tools.barrier_oracle, 'optimization_params': optimization_params, 'test_optimization_oracle': optimization_oracle_tools.argmin_test, 'test_optimization_params': {'arg': 'min'}, 'optimization_oracle_back': optimization_oracle_tools.barrier_oracle_back, 'loss_func': loss_map_barrier[loss_func], 'optimizer': optim_tools.adam, # 'optimizer': optim_tools.sgd_momentum, # Notes: # SPO, teo layers: lr = 1.0 'optimizer_config': {'learning_rate': 0.1, 'lr_decay': 0.999}, 'require_grad': True, 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=5000, if_quiet=True, test_set={ 'features': x_test, 'cost_real': y_test, 'action_hindsight': actions_hindsight, 'argmin_hindsight': argmin_hindsight, }, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight, 'barrier') normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func + 'barrier', pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, 'barrier', ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func + 'barrier', pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, 'barrier', ] print('argmin start.') predict_model = _pred_model_map(pred_model) spo_model = spo_framework.SpoTest({ 'n_features': n_features, 'dim_cost': dim_cost, 'baseline_action': baseline_action, 'predict_model': predict_model, 'optimization_oracle': optimization_oracle_tools.softmax_oracle, 'optimization_params': {'const': None}, 'loss_func': loss_map_argmin[loss_func], 'optimizer': torch.optim.Adam(predict_model.parameters()), 'require_grad': False, 'minibatch_size': min(64, n_samples), 'if_argmax': True, }) loss = spo_model.update( x_input, y_input, num_iter=10000, if_quiet=True, test_set={'features': x_test, 'cost_real': y_test, 'action_hindsight': argmin_hindsight}, if_test_ini=if_test_ini and (j == 0), ) loss_test = loss['loss_spo_test'] hindsight = loss['hindsight'] print(loss_func, pred_model, loss_test, hindsight, 'argmin') normal_spo = loss_test / hindsight train_spo = np.array(loss['loss_spo'][-100:-1]).mean() / hindsight if loss['loss_spo_baseline'] is not None: baseline_spo = loss['loss_spo_baseline'] / hindsight else: baseline_spo = None if if_test_ini: if j == 0: loss_ini = loss['loss_spo_test_ini'] hind_ini = loss['hindsight_ini'] spo_ini = loss_ini / hind_ini test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, spo_ini, baseline_spo, 'argmin', ] else: test_results.loc[len(test_results.index)] = list(param_value) + [ loss_func, pred_model, normal_spo, hindsight, train_spo, None, baseline_spo, 'argmin', ] return test_results
45.481303
116
0.566399
8,575
75,408
4.56898
0.02484
0.039409
0.01825
0.022129
0.973506
0.967661
0.962148
0.961612
0.959111
0.959111
0
0.009346
0.331676
75,408
1,657
117
45.508751
0.768062
0.055962
0
0.856499
0
0
0.112075
0.00474
0
0
0
0
0.001503
1
0.018032
false
0
0.009016
0
0.049587
0.021037
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
b60ecf2f54501e6b74358a678084541de56dae92
3,501
py
Python
PYTHON PROJECT 1/Main Questions/impulse_and_step.py
Pavan1199/Python-Signal-Processing-Project
9f9b71c1e44f858ec3dbec8275079e30b6c0d47a
[ "MIT" ]
1
2019-05-03T17:19:05.000Z
2019-05-03T17:19:05.000Z
PYTHON PROJECT 1/Main Questions/impulse_and_step.py
Pavan1199/Python-Signal-Processing-Project
9f9b71c1e44f858ec3dbec8275079e30b6c0d47a
[ "MIT" ]
null
null
null
PYTHON PROJECT 1/Main Questions/impulse_and_step.py
Pavan1199/Python-Signal-Processing-Project
9f9b71c1e44f858ec3dbec8275079e30b6c0d47a
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import numpy as np from impulse import impulse from step import step from comb_step_ramp4 import comb_step_ramp4 t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) t = np.arange(-10, 10,0.01) i=[] impulse(t,i) z=[] for j in range(len(t)): z.append(i[j]*x[j]) plt.step(t,z) plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*d(t)') plt.show() t = np.arange(-10, 10,0.01) i=[] impulse(-t,i) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(i[j]*x[j]) plt.step(t,z) plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*d(-t)') plt.show() t = np.arange(-10, 10,0.01) t[:]=[x+2 for x in t] i=[] impulse(t,i) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(round(i[j]*x[j],3)) plt.step(t,z) plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*d(t+2)') plt.show() t = np.arange(-10, 10,0.01) t[:]=[x-2 for x in t] i=[] impulse(t,i) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(round(i[j]*x[j],3)) plt.step(t,z) plt.axhline(0, color='black') plt.axvline(0, color='black') plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*d(t-2)') plt.show() t = np.arange(-10, 10,0.01) i=[] impulse(t,i) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(round(i[j]*x[j],3)) z[:]=[x*4 for x in z] plt.step(t,z) plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*4*d(t)') plt.show() t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) t = np.arange(-10, 10,0.01) u=[] step(t,u) z=[] for j in range(len(t)): z.append(u[j]*x[j]) plt.step(t,z) plt.axhline(0, color='black') plt.axvline(0, color='black') plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*u(t)') plt.show() t = np.arange(-10, 10,0.01) u=[] step(-t,u) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(u[j]*x[j]) plt.step(t,z) plt.axhline(0, color='black') plt.axvline(0, color='black') plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*u(-t)') plt.show() t = np.arange(-10, 10,0.01) t[:]=[x+2 for x in t] u=[] step(t,u) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(round(u[j]*x[j],3)) plt.step(t,z) plt.axhline(0, color='black') plt.axvline(0, color='black') plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*u(t+2)') plt.show() t = np.arange(-10, 10,0.01) t[:]=[x-2 for x in t] u=[] step(t,u) print(i) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(round(u[j]*x[j],3)) plt.step(t,z) plt.axhline(0, color='black') plt.axvline(0, color='black') plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*u(t-2)') plt.show() t = np.arange(-10, 10,0.01) u=[] step(t,u) t = np.arange(-10, 10,0.01) x=[] comb_step_ramp4(t,x) z=[] for j in range(len(t)): z.append(round(u[j]*x[j],3)) z[:]=[x*4 for x in z] plt.step(t,z) plt.axhline(0, color='black') plt.axvline(0, color='black') plt.xlabel('time') plt.ylabel('function value') plt.title('x(t)*4*u(t)') plt.show()
15.355263
44
0.562411
707
3,501
2.751061
0.062235
0.030848
0.092545
0.113111
0.931105
0.931105
0.931105
0.931105
0.931105
0.931105
0
0.064175
0.189946
3,501
227
45
15.422907
0.62165
0
0
0.890244
0
0
0.10507
0
0
0
0
0
0
1
0
false
0
0.030488
0
0.030488
0.006098
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
373dbc66e63bb9b35a12354196f929cafa6dafd1
2,895
py
Python
Spielen_Konachan_net/Picture_Download_Und_Save.py
ChrisVicky/ImageDownloader
d8dd0bc60b08ce7adede16f8a4ad2617b9e0b04e
[ "Apache-2.0" ]
1
2021-01-02T11:02:46.000Z
2021-01-02T11:02:46.000Z
Spielen_Konachan_net/Picture_Download_Und_Save.py
ChrisVicky/CodingHomeWork2020
b8946c1d32c3aaecb3de5cc8247a9e5a4653a778
[ "Apache-2.0" ]
null
null
null
Spielen_Konachan_net/Picture_Download_Und_Save.py
ChrisVicky/CodingHomeWork2020
b8946c1d32c3aaecb3de5cc8247a9e5a4653a778
[ "Apache-2.0" ]
null
null
null
import time import os import requests import FileStuff def SpaceCut(string): for i in range(1, 4): string = string[string.find('%20')+3:] return string def DownloadUndSavePictures_(url, name, Num): try: path = FileStuff.getFolderName() if not os.path.exists(path): os.makedirs(path) time.sleep(0.01) picture = requests.get(url) chunk_size = 1024 size = 0 start = time.time() picture_size = int(picture.headers['content-length']) File_Name = path + '\\' + name + '.jpg' print('[ %s ][第 %d 张][大小 %.2fM ]' % (name, Num, float(float(picture_size)/1024/1024))) print('[本地地址]:%s' % File_Name) print('[下载源]:%s' % url) file = open(File_Name, 'wb') for data in picture.iter_content(chunk_size=chunk_size): file.write(data) size += len(data) print('\r'+'[正在下载]:%s %d%%' % ('#'*int(50*size/picture_size), min(int(100*size/picture_size), 100)), end='') # time.sleep(0.001) end = time.time() print('\n[用时]:%.2fs\n' % (end-start)) except Exception as e: exit(e) FileStuff.BackUp(str(name), str(url)) return def DownloadUndSavePictures(url, name, Num, TotalNum): try: path = FileStuff.getFolderName() if not os.path.exists(path): os.makedirs(path) time.sleep(0.01) picture = requests.get(url) chunk_size = 1024 size = 0 start = time.time() picture_size = int(picture.headers['content-length']) File_Name = path + '\\' + name + '.jpg' print('[ %s ][第 %d 张/共 %d 张][大小 %.2fM ]' % (name, Num, TotalNum, float(float(picture_size)/1024/1024))) print('[本地地址]:%s' % File_Name) print('[下载源]:%s' % url) file = open(File_Name, 'wb') for data in picture.iter_content(chunk_size=chunk_size): file.write(data) size += len(data) print('\r'+'[正在下载]:%s %d%%' % ('#'*int(50*size/picture_size), min(int(100*size/picture_size), 100)), end='') # time.sleep(0.001) end = time.time() print('\n[用时]:%.2fs\n' % (end-start)) except Exception as e: exit(e) FileStuff.BackUp(str(name), str(url)) return # # DownloadPicture( # 'https://konachan.net/image/b28c76f1c19606871f55bcff9050f4e5/Konachan.com%20-%20221153%20animal%20bird%20blue_eyes%20dress%20ein_eis%20hat%20long_hair%20yahari_ore_no_seishun_love_come_wa_machigatteiru.%20yukinoshita_yukino.jpg', # 'Yukinoshita','\Yukino',1,2 # ) # DownloadPicture( # 'https://konachan.net/image/b28c76f1c19606871f55bcff9050f4e5/Konachan.com%20-%20221153%20animal%20bird%20blue_eyes%20dress%20ein_eis%20hat%20long_hair%20yahari_ore_no_seishun_love_come_wa_machigatteiru.%20yukinoshita_yukino.jpg', # 'Yukinoshita2','\Yukino',2,2 # )
37.115385
235
0.601382
372
2,895
4.55914
0.295699
0.051887
0.023585
0.038915
0.899764
0.857311
0.841981
0.841981
0.841981
0.841981
0
0.075881
0.235233
2,895
78
236
37.115385
0.690154
0.209326
0
0.8
0
0
0.087796
0
0
0
0
0
0
1
0.05
false
0
0.066667
0
0.166667
0.166667
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
374de96317e838e9f7dc7fab31ffa44a824aba20
37,603
py
Python
tempest/api/workloadmgr/regression/test_regression.py
deepanshusagar/tempest-1
2c7609ef72a606e2b6c39d185f98aa28b4d20afa
[ "Apache-2.0" ]
null
null
null
tempest/api/workloadmgr/regression/test_regression.py
deepanshusagar/tempest-1
2c7609ef72a606e2b6c39d185f98aa28b4d20afa
[ "Apache-2.0" ]
null
null
null
tempest/api/workloadmgr/regression/test_regression.py
deepanshusagar/tempest-1
2c7609ef72a606e2b6c39d185f98aa28b4d20afa
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.workloadmgr import base from tempest import config from tempest import test from tempest import tvaultconf import json import sys from tempest import api from oslo_log import log as logging from tempest.common import waiters from tempest import tvaultconf from tempest import reporting import time from tempest import command_argument_string from tempest.util import cli_parser from tempest.util import query_data import collections LOG = logging.getLogger(__name__) CONF = config.CONF class WorkloadsTest(base.BaseWorkloadmgrTest): credentials = ['primary'] @classmethod def setup_clients(cls): super(WorkloadsTest, cls).setup_clients() cls.client = cls.os.wlm_client @test.pre_req({'type':'bootfromvol_workload_medium'}) @test.attr(type='smoke') @test.idempotent_id('9fe07175-912e-49a5-a629-5f522eada4c9') def test_1_regression(self): reporting.add_test_script(str(__name__)+"_one_click_restore_bootfromvol") try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception (str(self.exception)) LOG.debug("pre req completed") self.created=False #Delete the original instance self.delete_vms(self.workload_instances) self.delete_key_pair(tvaultconf.key_pair_name) self.delete_security_group(self.security_group_id) self.delete_flavor(self.flavor_id) LOG.debug("Instances deleted successfully") #Create one-click restore using CLI command restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_ids[1] rc = cli_parser.cli_returncode(restore_command) if rc != 0: reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS) LOG.debug("Command executed correctly") wc = query_data.get_snapshot_restore_status(tvaultconf.restore_name,self.snapshot_ids[1]) LOG.debug("Snapshot restore status: " + str(wc)) while (str(wc) != "available" or str(wc)!= "error"): time.sleep (5) wc = query_data.get_snapshot_restore_status(tvaultconf.restore_name, self.snapshot_ids[1]) LOG.debug("Snapshot restore status: " + str(wc)) if (str(wc) == "available"): LOG.debug("Snapshot Restore successfully completed") reporting.add_test_step("Snapshot one-click restore verification with DB", tvaultconf.PASS) self.created = True break else: if (str(wc) == "error"): break if (self.created == False): reporting.add_test_step("Snapshot one-click restore verification with DB", tvaultconf.FAIL) raise Exception ("Snapshot Restore did not get created") self.restore_id = query_data.get_snapshot_restore_id(self.snapshot_id) LOG.debug("Restore ID: " + str(self.restore_id)) #Fetch instance details after restore self.restored_vm_details_list = [] #restored vms list self.vm_list = self.get_restored_vm_list(self.restore_id) LOG.debug("Restored vms : " + str (self.vm_list)) #restored vms all details list for id in range(len(self.workload_instances)): self.restored_vm_details_list.append(self.get_vm_details(self.vm_list[id])) LOG.debug("Restored vm details list: " + str(self.restored_vm_details_list)) #required details of restored vms self.vms_details_after_restore = self.get_vms_details_list(self.restored_vm_details_list) LOG.debug("VM details after restore: " + str(self.vms_details_after_restore)) #Verify floating ips self.floating_ips_after_restore = [] for i in range(len(self.vms_details_after_restore)): self.floating_ips_after_restore.append(self.vms_details_after_restore[i]['floating_ip']) if(self.floating_ips_after_restore.sort() == self.floating_ips_list.sort()): reporting.add_test_step("Floating ip verification", tvaultconf.PASS) else: LOG.error("Floating ips before restore: " + str(self.floating_ips_list.sort())) LOG.error("Floating ips after restore: " + str(self.floating_ips_after_restore.sort())) reporting.add_test_step("Floating ip verification", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) #calculate md5sum after restore tree = lambda: collections.defaultdict(tree) md5_sum_after_oneclick_restore = tree() for floating_ip in self.floating_ips_list: for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) md5_sum_after_oneclick_restore[str(floating_ip)][str(mount_point)] = self.calculatemmd5checksum(ssh, mount_point) ssh.close() LOG.debug("md5_sum_after_oneclick_restore" + str(md5_sum_after_oneclick_restore)) #md5sum verification if(self.md5sums_dir_before == md5_sum_after_oneclick_restore): reporting.add_test_step("Md5 Verification", tvaultconf.PASS) else: reporting.set_test_script_status(tvaultconf.FAIL) reporting.add_test_step("Md5 Verification", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() @test.pre_req({'type':'nested_security'}) @test.attr(type='smoke') @test.idempotent_id('9fe07175-912e-49a5-a629-5f522eada4c9') def test_2_regression(self): reporting.add_test_script(str(__name__)+"_nested_security") try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception (str(self.exception)) LOG.debug("pre req completed") except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() @test.pre_req({'type':'inplace'}) @test.attr(type='smoke') @test.idempotent_id('9fe07175-912e-49a5-a629-5f52eeada4c9') def test_3_regression(self): reporting.add_test_script(str(__name__)+"_inplace_restore_cli") try: LOG.debug("pre req completed") volumes = tvaultconf.volumes_parts mount_points = ["mount_data_b", "mount_data_c"] #calculate md5 sum before tree = lambda: collections.defaultdict(tree) self.md5sums_dir_before = tree() ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[0])) self.md5sums_dir_before[str(self.floating_ips_list[0])][str(mount_points[0])] = self.calculatemmd5checksum(ssh, mount_points[0]) ssh.close() ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[1])) self.md5sums_dir_before[str(self.floating_ips_list[1])][str(mount_points[0])] = self.calculatemmd5checksum(ssh, mount_points[0]) self.md5sums_dir_before[str(self.floating_ips_list[1])][str(mount_points[1])] = self.calculatemmd5checksum(ssh, mount_points[1]) ssh.close() LOG.debug("md5sums_dir_before" + str(self.md5sums_dir_before)) #Fill some data on each of the volumes attached ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[0])) self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 2) ssh.close() ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[1])) self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 2) self.addCustomSizedfilesOnLinux(ssh, mount_points[1], 2) ssh.close() #Create in-place restore with CLI command restore_command = command_argument_string.inplace_restore + str(tvaultconf.restore_filename) + " " + str(self.incr_snapshot_id) LOG.debug("inplace restore cli command: " + str(restore_command) ) #Restore.json with only volume 2 excluded restore_json = json.dumps({ 'openstack': { 'instances': [{ 'restore_boot_disk': True, 'include': True, 'id': self.workload_instances[0], 'vdisks': [{ 'restore_cinder_volume': True, 'id': self.volumes_list[0], 'new_volume_type':CONF.volume.volume_type }] }, { 'restore_boot_disk': True, 'include': True, 'id': self.workload_instances[1], 'vdisks': [{ 'restore_cinder_volme': True, 'id': self.volumes_list[1], 'new_volume_type':CONF.volume.volume_type }] }], 'networks_mapping': { 'networks': [] } }, 'restore_type': 'inplace', 'type': 'openstack' }) LOG.debug("restore.json for inplace restore: " + str(restore_json)) #Create Restore.json with open(tvaultconf.restore_filename, 'w') as f: f.write(str(json.loads(restore_json))) rc = cli_parser.cli_returncode(restore_command) if rc != 0: reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.PASS) LOG.debug("Command executed correctly") #get restore id from database self.restore_id = query_data.get_snapshot_restore_id(self.incr_snapshot_id) self.wait_for_snapshot_tobe_available(self.workload_id, self.incr_snapshot_id) #get in-place restore status if(self.getRestoreStatus(self.workload_id, self.incr_snapshot_id, self.restore_id) == "available"): reporting.add_test_step("In-place restore", tvaultconf.PASS) else: reporting.add_test_step("In-place restore", tvaultconf.FAIL) raise Exception("In-place restore failed") # mount volumes after restore ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[0])) self.execute_command_disk_mount(ssh, str(self.floating_ips_list[0]),[volumes[0]],[mount_points[0]]) ssh.close() ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[1])) self.execute_command_disk_mount(ssh, str(self.floating_ips_list[1]),volumes,mount_points) ssh.close() # calculate md5 after inplace restore tree = lambda: collections.defaultdict(tree) md5_sum_after_in_place_restore = tree() ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[0])) md5_sum_after_in_place_restore[str(self.floating_ips_list[0])][str(mount_points[0])] = self.calculatemmd5checksum(ssh, mount_points[0]) ssh.close() ssh = self.SshRemoteMachineConnectionWithRSAKey(str(self.floating_ips_list[1])) md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(mount_points[0])] = self.calculatemmd5checksum(ssh, mount_points[0]) md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(mount_points[1])] = self.calculatemmd5checksum(ssh, mount_points[1]) ssh.close() LOG.debug("md5_sum_after_in_place_restore" + str(md5_sum_after_in_place_restore)) #md5 sum verification if self.md5sums_dir_before[str(self.floating_ips_list[0])][str(mount_points[0])]==md5_sum_after_in_place_restore[str(self.floating_ips_list[0])][str(mount_points[0])]: reporting.add_test_step("Md5 Verification for volume 1", tvaultconf.PASS) else: reporting.add_test_step("Md5 Verification for volume 1", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(mount_points[0])]==md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(mount_points[0])]: reporting.add_test_step("Md5 Verification for volume 2", tvaultconf.PASS) else: reporting.add_test_step("Md5 Verification for volume 2", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(mount_points[1])]!=md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(mount_points[1])]: reporting.add_test_step("Md5 Verification for volume 3", tvaultconf.PASS) else: reporting.add_test_step("Md5 Verification for volume 3", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() finally: #Delete restore for snapshot self.restored_volumes = self.get_restored_volume_list(self.restore_id) if tvaultconf.cleanup==True: self.restore_delete(self.workload_id, self.incr_snapshot_id, self.restore_id) LOG.debug("Snapshot Restore deleted successfully") #Delete restored volumes and volume snapshots self.delete_volumes(self.restored_volumes) @test.pre_req({'type':'bootfrom_image_with_floating_ips'}) @test.attr(type='smoke') @test.idempotent_id('9fe07175-912e-49a5-a629-5f52eeada4c9') def test_4_regression(self): reporting.add_test_script(str(__name__)+"_selective_restore_default_values") try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception (str(self.exception)) LOG.debug("pre req completed") volumes = tvaultconf.volumes_parts mount_points = ["mount_data_b", "mount_data_c"] int_net_1_name = self.get_net_name(CONF.network.internal_network_id) LOG.debug("int_net_1_name" + str(int_net_1_name)) int_net_1_subnets = self.get_subnet_id(CONF.network.internal_network_id) LOG.debug("int_net_1_subnet" + str(int_net_1_subnets)) #Create instance details for restore.json for i in range(len(self.workload_instances)): vm_name = "tempest_test_vm_"+str(i+1)+"_restored" temp_instance_data = { 'id': self.workload_instances[i], 'include': True, 'restore_boot_disk': True, 'name': vm_name, 'vdisks':[] } self.instance_details.append(temp_instance_data) LOG.debug("Instance details for restore: " + str(self.instance_details)) #Create network details for restore.json snapshot_network = { 'name': int_net_1_name, 'id': CONF.network.internal_network_id, 'subnet': { 'id': int_net_1_subnets } } target_network = { 'name': int_net_1_name, 'id': CONF.network.internal_network_id, 'subnet': { 'id': int_net_1_subnets } } self.network_details = [ { 'snapshot_network': snapshot_network, 'target_network': target_network } ] LOG.debug("Network details for restore: " + str(self.network_details)) #Fill some more data on each volume attached tree = lambda: collections.defaultdict(tree) self.md5sums_dir_before = tree() for floating_ip in self.floating_ips_list: for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) self.addCustomSizedfilesOnLinux(ssh, mount_point, 5) ssh.close() for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) self.md5sums_dir_before[str(floating_ip)][str(mount_point)] = self.calculatemmd5checksum(ssh, mount_point) ssh.close() LOG.debug("md5sums_dir_before" + str(self.md5sums_dir_before)) #Trigger selective restore self.restore_id=self.snapshot_selective_restore(self.workload_id, self.snapshot_id,restore_name=tvaultconf.restore_name, instance_details=self.instance_details, network_details=self.network_details) self.wait_for_snapshot_tobe_available(self.workload_id, self.snapshot_id) if(self.getRestoreStatus(self.workload_id, self.snapshot_id, self.restore_id) == "available"): reporting.add_test_step("Selective restore", tvaultconf.PASS) else: reporting.add_test_step("Selective restore", tvaultconf.FAIL) raise Exception("Selective restore failed") #Fetch instance details after restore self.restored_vm_details_list = [] self.vm_list = self.get_restored_vm_list(self.restore_id) LOG.debug("Restored vms : " + str (self.vm_list)) for id in range(len(self.vm_list)): self.restored_vm_details_list.append(self.get_vm_details(self.vm_list[id])) LOG.debug("Restored vm details list: " + str(self.restored_vm_details_list)) self.vms_details_after_restore = self.get_vms_details_list(self.restored_vm_details_list) LOG.debug("VM details after restore: " + str(self.vms_details_after_restore)) #Compare the data before and after restore for i in range(len(self.vms_details_after_restore)): if(self.vms_details_after_restore[i]['network_name'] == int_net_1_name): reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS) else: LOG.error("Expected network: " + str(int_net_1_name)) LOG.error("Restored network: " + str(self.vms_details_after_restore[i]['network_name'])) reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) if(self.get_key_pair_details(self.vms_details_after_restore[i]['keypair']) == self.original_fingerprint): reporting.add_test_step("Keypair verification for instance-" + str(i+1), tvaultconf.PASS) else: LOG.error("Original keypair details: " + str(self.original_fingerprint)) LOG.error("Restored keypair details: " + str(self.get_key_pair_details(self.vms_details_after_restore[i]['keypair']))) reporting.add_test_step("Keypair verification for instance-" + str(i+1), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) if(self.get_flavor_details(self.vms_details_after_restore[i]['flavor_id']) == self.original_flavor_conf): reporting.add_test_step("Flavor verification for instance-" + str(i+1), tvaultconf.PASS) else: LOG.error("Original flavor details: " + str(self.original_flavor_conf)) LOG.error("Restored flavor details: " + str(self.get_flavor_details(self.vms_details_after_restore[i]['flavor_id']))) reporting.add_test_step("Flavor verification for instance-" + str(i+1), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) #Verify floating ips self.floating_ips_after_restore = [] for i in range(len(self.vms_details_after_restore)): self.floating_ips_after_restore.append(self.vms_details_after_restore[i]['floating_ip']) if(self.floating_ips_after_restore.sort() == self.floating_ips_list.sort()): reporting.add_test_step("Floating ip verification", tvaultconf.PASS) else: LOG.error("Floating ips before restore: " + str(self.floating_ips_list.sort())) LOG.error("Floating ips after restore: " + str(self.floating_ips_after_restore.sort())) reporting.add_test_step("Floating ip verification", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) #calculate md5sum after restore tree = lambda: collections.defaultdict(tree) md5_sum_after_selective_restore = tree() for floating_ip in self.floating_ips_list: for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) md5_sum_after_selective_restore[str(floating_ip)][str(mount_point)] = self.calculatemmd5checksum(ssh, mount_point) ssh.close() LOG.debug("md5_sum_after_selective_restore" + str(md5_sum_after_selective_restore)) #md5sum verification if(self.md5sums_dir_before == md5_sum_after_selective_restore): reporting.add_test_step("Md5 Verification", tvaultconf.PASS) else: reporting.set_test_script_status(tvaultconf.FAIL) reporting.add_test_step("Md5 Verification", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() @test.pre_req({'type':'bootfromvol_workload_medium'}) @test.attr(type='smoke') @test.idempotent_id('9fe07175-912e-49a5-a629-5f522eada4c9') def test_5_regression(self): reporting.add_test_script(str(__name__)+"_selective_restore_bootfromvol") try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception (str(self.exception)) LOG.debug("pre req completed") self.created=False volumes = tvaultconf.volumes_parts mount_points = ["mount_data_b", "mount_data_c"] int_net_1_name = self.get_net_name(CONF.network.internal_network_id) LOG.debug("int_net_1_name" + str(int_net_1_name)) int_net_1_subnets = self.get_subnet_id(CONF.network.internal_network_id) LOG.debug("int_net_1_subnet" + str(int_net_1_subnets)) #Create instance details for restore.json for i in range(len(self.workload_instances)): vm_name = "tempest_test_vm_"+str(i+1)+"_restored" temp_instance_data = { 'id': self.workload_instances[i], 'include': True, 'restore_boot_disk': True, 'name': vm_name, 'vdisks':[] } self.instance_details.append(temp_instance_data) LOG.debug("Instance details for restore: " + str(self.instance_details)) #Create network details for restore.json snapshot_network = { 'name': int_net_1_name, 'id': CONF.network.internal_network_id, 'subnet': { 'id': int_net_1_subnets } } target_network = { 'name': int_net_1_name, 'id': CONF.network.internal_network_id, 'subnet': { 'id': int_net_1_subnets } } self.network_details = [ { 'snapshot_network': snapshot_network, 'target_network': target_network } ] LOG.debug("Network details for restore: " + str(self.network_details)) #Fill some more data on each volume attached tree = lambda: collections.defaultdict(tree) self.md5sums_dir_before = tree() for floating_ip in self.floating_ips_list: for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) self.addCustomSizedfilesOnLinux(ssh, mount_point, 5) ssh.close() for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) self.md5sums_dir_before[str(floating_ip)][str(mount_point)] = self.calculatemmd5checksum(ssh, mount_point) ssh.close() LOG.debug("md5sums_dir_before" + str(self.md5sums_dir_before)) #Trigger selective restore self.restore_id=self.snapshot_selective_restore(self.workload_id, self.snapshot_id,restore_name=tvaultconf.restore_name, instance_details=self.instance_details, network_details=self.network_details) self.wait_for_snapshot_tobe_available(self.workload_id, self.snapshot_id) if(self.getRestoreStatus(self.workload_id, self.snapshot_id, self.restore_id) == "available"): reporting.add_test_step("Selective restore", tvaultconf.PASS) else: reporting.add_test_step("Selective restore", tvaultconf.FAIL) raise Exception("Selective restore failed") #Fetch instance details after restore self.restored_vm_details_list = [] self.vm_list = self.get_restored_vm_list(self.restore_id) LOG.debug("Restored vms : " + str (self.vm_list)) for id in range(len(self.vm_list)): self.restored_vm_details_list.append(self.get_vm_details(self.vm_list[id])) LOG.debug("Restored vm details list: " + str(self.restored_vm_details_list)) self.vms_details_after_restore = self.get_vms_details_list(self.restored_vm_details_list) LOG.debug("VM details after restore: " + str(self.vms_details_after_restore)) #Compare the data before and after restore for i in range(len(self.vms_details_after_restore)): if(self.vms_details_after_restore[i]['network_name'] == int_net_1_name): reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS) else: LOG.error("Expected network: " + str(int_net_1_name)) LOG.error("Restored network: " + str(self.vms_details_after_restore[i]['network_name'])) reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) if(self.get_key_pair_details(self.vms_details_after_restore[i]['keypair']) == self.original_fingerprint): reporting.add_test_step("Keypair verification for instance-" + str(i+1), tvaultconf.PASS) else: LOG.error("Original keypair details: " + str(self.original_fingerprint)) LOG.error("Restored keypair details: " + str(self.get_key_pair_details(self.vms_details_after_restore[i]['keypair']))) reporting.add_test_step("Keypair verification for instance-" + str(i+1), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) if(self.get_flavor_details(self.vms_details_after_restore[i]['flavor_id']) == self.original_flavor_conf): reporting.add_test_step("Flavor verification for instance-" + str(i+1), tvaultconf.PASS) else: LOG.error("Original flavor details: " + str(self.original_flavor_conf)) LOG.error("Restored flavor details: " + str(self.get_flavor_details(self.vms_details_after_restore[i]['flavor_id']))) reporting.add_test_step("Flavor verification for instance-" + str(i+1), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) #Verify floating ips self.floating_ips_after_restore = [] for i in range(len(self.vms_details_after_restore)): self.floating_ips_after_restore.append(self.vms_details_after_restore[i]['floating_ip']) if(self.floating_ips_after_restore.sort() == self.floating_ips_list.sort()): reporting.add_test_step("Floating ip verification", tvaultconf.PASS) else: LOG.error("Floating ips before restore: " + str(self.floating_ips_list.sort())) LOG.error("Floating ips after restore: " + str(self.floating_ips_after_restore.sort())) reporting.add_test_step("Floating ip verification", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) #calculate md5sum after restore tree = lambda: collections.defaultdict(tree) md5_sum_after_selective_restore = tree() for floating_ip in self.floating_ips_list: for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) md5_sum_after_selective_restore[str(floating_ip)][str(mount_point)] = self.calculatemmd5checksum(ssh, mount_point) ssh.close() LOG.debug("md5_sum_after_selective_restore" + str(md5_sum_after_selective_restore)) #md5sum verification if(self.md5sums_dir_before == md5_sum_after_selective_restore): reporting.add_test_step("Md5 Verification", tvaultconf.PASS) else: reporting.set_test_script_status(tvaultconf.FAIL) reporting.add_test_step("Md5 Verification", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() @test.pre_req({'type':'bootfrom_image_with_floating_ips'}) @test.attr(type='smoke') @test.idempotent_id('9fe07175-912e-49a5-a629-5f522eada4c9') def test_6_regression(self): reporting.add_test_script(str(__name__)+"_one_click_restore_bootfrom_image") try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception (str(self.exception)) LOG.debug("pre req completed") self.created=False #Delete the original instance self.delete_vms(self.workload_instances) self.delete_key_pair(tvaultconf.key_pair_name) self.delete_security_group(self.security_group_id) self.delete_flavor(self.flavor_id) LOG.debug("Instances deleted successfully") #Create one-click restore using CLI command restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_ids[1] rc = cli_parser.cli_returncode(restore_command) if rc != 0: reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS) LOG.debug("Command executed correctly") wc = query_data.get_snapshot_restore_status(tvaultconf.restore_name,self.snapshot_ids[1]) LOG.debug("Snapshot restore status: " + str(wc)) while (str(wc) != "available" or str(wc)!= "error"): time.sleep (5) wc = query_data.get_snapshot_restore_status(tvaultconf.restore_name, self.snapshot_ids[1]) LOG.debug("Snapshot restore status: " + str(wc)) if (str(wc) == "available"): LOG.debug("Snapshot Restore successfully completed") reporting.add_test_step("Snapshot one-click restore verification with DB", tvaultconf.PASS) self.created = True break else: if (str(wc) == "error"): break if (self.created == False): reporting.add_test_step("Snapshot one-click restore verification with DB", tvaultconf.FAIL) raise Exception ("Snapshot Restore did not get created") self.restore_id = query_data.get_snapshot_restore_id(self.snapshot_id) LOG.debug("Restore ID: " + str(self.restore_id)) #Fetch instance details after restore self.restored_vm_details_list = [] #restored vms list self.vm_list = self.get_restored_vm_list(self.restore_id) LOG.debug("Restored vms : " + str (self.vm_list)) #restored vms all details list for id in range(len(self.workload_instances)): self.restored_vm_details_list.append(self.get_vm_details(self.vm_list[id])) LOG.debug("Restored vm details list: " + str(self.restored_vm_details_list)) #required details of restored vms self.vms_details_after_restore = self.get_vms_details_list(self.restored_vm_details_list) LOG.debug("VM details after restore: " + str(self.vms_details_after_restore)) #Verify floating ips self.floating_ips_after_restore = [] for i in range(len(self.vms_details_after_restore)): self.floating_ips_after_restore.append(self.vms_details_after_restore[i]['floating_ip']) if(self.floating_ips_after_restore.sort() == self.floating_ips_list.sort()): reporting.add_test_step("Floating ip verification", tvaultconf.PASS) else: LOG.error("Floating ips before restore: " + str(self.floating_ips_list.sort())) LOG.error("Floating ips after restore: " + str(self.floating_ips_after_restore.sort())) reporting.add_test_step("Floating ip verification", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) #calculate md5sum after restore tree = lambda: collections.defaultdict(tree) md5_sum_after_oneclick_restore = tree() for floating_ip in self.floating_ips_list: for mount_point in mount_points: ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip)) md5_sum_after_oneclick_restore[str(floating_ip)][str(mount_point)] = self.calculatemmd5checksum(ssh, mount_point) ssh.close() LOG.debug("md5_sum_after_oneclick_restore" + str(md5_sum_after_oneclick_restore)) #md5sum verification if(self.md5sums_dir_before == md5_sum_after_oneclick_restore): reporting.add_test_step("Md5 Verification", tvaultconf.PASS) else: reporting.set_test_script_status(tvaultconf.FAIL) reporting.add_test_step("Md5 Verification", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
52.813202
179
0.632396
4,351
37,603
5.175132
0.064353
0.022383
0.043345
0.048852
0.9114
0.906426
0.903451
0.89035
0.883155
0.880579
0
0.012246
0.272531
37,603
711
180
52.887482
0.810894
0.054278
0
0.819013
0
0
0.136366
0.020648
0
0
0
0
0
0
null
null
0.045704
0.02925
null
null
0.007313
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
8
808ca05b1b185dc1f7778b012b14c05c747a3b71
10,005
py
Python
tests/test_udp.py
benoitc/pyuv
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
[ "MIT" ]
1
2020-01-21T11:10:38.000Z
2020-01-21T11:10:38.000Z
tests/test_udp.py
benoitc/pyuv
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
[ "MIT" ]
null
null
null
tests/test_udp.py
benoitc/pyuv
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
[ "MIT" ]
null
null
null
from common import unittest2, platform_skip import common import pyuv import socket TEST_PORT = 12345 TEST_PORT2 = 12346 MULTICAST_ADDRESS = "239.255.0.1" class UDPTest(unittest2.TestCase): def setUp(self): self.loop = pyuv.Loop.default_loop() self.server = None self.client = None def on_close(self, handle): self.on_close_called += 1 def on_server_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PING") self.server.send((ip, port), b"PONG"+common.linesep) def on_client_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PONG") self.client.close(self.on_close) self.server.close(self.on_close) def timer_cb(self, timer): self.client.send(("127.0.0.1", TEST_PORT), b"PING"+common.linesep) timer.close(self.on_close) def test_udp_pingpong(self): self.on_close_called = 0 self.server = pyuv.UDP(self.loop) self.server.bind(("0.0.0.0", TEST_PORT)) self.server.set_broadcast(True) # for coverage try: self.server.set_ttl(10) # for coverage except pyuv.error.UDPError: # This function is not implemented on Windows pass self.server.start_recv(self.on_server_recv) self.client = pyuv.UDP(self.loop) self.client.bind(("0.0.0.0", TEST_PORT2)) self.client.start_recv(self.on_client_recv) timer = pyuv.Timer(self.loop) timer.start(self.timer_cb, 0.1, 0) self.loop.run() self.assertEqual(self.on_close_called, 3) class UDPTestNull(unittest2.TestCase): def setUp(self): self.loop = pyuv.Loop.default_loop() self.server = None self.client = None def on_close(self, handle): self.on_close_called += 1 def on_server_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PIN\x00G") self.server.send((ip, port), b"PONG"+common.linesep) def on_client_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PONG") self.client.close(self.on_close) self.server.close(self.on_close) def timer_cb(self, timer): self.client.send(("127.0.0.1", TEST_PORT), b"PIN\x00G"+common.linesep) timer.close(self.on_close) def test_udp_pingpong_null(self): self.on_close_called = 0 self.server = pyuv.UDP(self.loop) self.server.bind(("0.0.0.0", TEST_PORT)) self.server.start_recv(self.on_server_recv) self.client = pyuv.UDP(self.loop) self.client.bind(("0.0.0.0", TEST_PORT2)) self.client.start_recv(self.on_client_recv) timer = pyuv.Timer(self.loop) timer.start(self.timer_cb, 0.1, 0) self.loop.run() self.assertEqual(self.on_close_called, 3) class UDPTestList(unittest2.TestCase): def setUp(self): self.loop = pyuv.Loop.default_loop() self.server = None self.client = None def on_close(self, handle): self.on_close_called += 1 def on_server_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PING") self.server.sendlines((ip, port), [b"PONG", common.linesep]) def on_client_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PONG") self.client.close(self.on_close) self.server.close(self.on_close) def timer_cb(self, timer): self.client.sendlines(("127.0.0.1", TEST_PORT), [b"PING", common.linesep]) timer.close(self.on_close) def test_udp_pingpong_list(self): self.on_close_called = 0 self.server = pyuv.UDP(self.loop) self.server.bind(("0.0.0.0", TEST_PORT)) self.server.start_recv(self.on_server_recv) self.client = pyuv.UDP(self.loop) self.client.bind(("0.0.0.0", TEST_PORT2)) self.client.start_recv(self.on_client_recv) timer = pyuv.Timer(self.loop) timer.start(self.timer_cb, 0.1, 0) self.loop.run() self.assertEqual(self.on_close_called, 3) class UDPTestListNull(unittest2.TestCase): def setUp(self): self.loop = pyuv.Loop.default_loop() self.server = None self.client = None def on_close(self, handle): self.on_close_called += 1 def on_server_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PIN\x00G") self.server.sendlines((ip, port), [b"PONG", common.linesep]) def on_client_recv(self, handle, ip_port, data, error): ip, port = ip_port data = data.strip() self.assertEquals(data, b"PONG") self.client.close(self.on_close) self.server.close(self.on_close) def timer_cb(self, timer): self.client.sendlines(("127.0.0.1", TEST_PORT), [b"PIN\x00G", common.linesep]) timer.close(self.on_close) def test_udp_pingpong_list_null(self): self.on_close_called = 0 self.server = pyuv.UDP(self.loop) self.server.bind(("0.0.0.0", TEST_PORT)) self.server.start_recv(self.on_server_recv) self.client = pyuv.UDP(self.loop) self.client.bind(("0.0.0.0", TEST_PORT2)) self.client.start_recv(self.on_client_recv) timer = pyuv.Timer(self.loop) timer.start(self.timer_cb, 0.1, 0) self.loop.run() self.assertEqual(self.on_close_called, 3) class UDPTestInvalidData(unittest2.TestCase): def setUp(self): self.loop = pyuv.Loop.default_loop() self.server = None self.client = None def on_close(self, handle): self.on_close_called += 1 def on_server_recv(self, handle, ip_port, data, error): ip, port = ip_port self.client.close(self.on_close) self.server.close(self.on_close) self.fail("Expected send to fail.") def timer_cb(self, timer): self.assertRaises(TypeError, self.client.send, ("127.0.0.1", TEST_PORT), object()) self.assertRaises(TypeError, self.client.send, ("127.0.0.1", TEST_PORT), 1) self.assertRaises(TypeError, self.client.sendlines, ("127.0.0.1", TEST_PORT), object()) self.assertRaises(TypeError, self.client.sendlines, ("127.0.0.1", TEST_PORT), 1) self.client.close(self.on_close) self.server.close(self.on_close) timer.close(self.on_close) def test_udp_invalid_data(self): self.on_close_called = 0 self.server = pyuv.UDP(self.loop) self.server.bind(("0.0.0.0", TEST_PORT)) self.server.start_recv(self.on_server_recv) self.client = pyuv.UDP(self.loop) self.client.bind(("0.0.0.0", TEST_PORT2)) timer = pyuv.Timer(self.loop) timer.start(self.timer_cb, 0.1, 0) self.loop.run() self.assertEqual(self.on_close_called, 3) @platform_skip(["win32"]) class UDPTestMulticast(unittest2.TestCase): def setUp(self): self.loop = pyuv.Loop.default_loop() self.server = None self.client = None self.received_data = None def on_close(self, handle): self.on_close_called += 1 def on_client_recv(self, handle, ip_port, data, error): ip, port = ip_port self.received_data = data.strip() self.client.set_membership(MULTICAST_ADDRESS, pyuv.UV_LEAVE_GROUP) self.client.close(self.on_close) def on_server_send(self, handle, error): handle.close(self.on_close) def test_udp_multicast(self): self.on_close_called = 0 self.server = pyuv.UDP(self.loop) self.client = pyuv.UDP(self.loop) self.client.bind((MULTICAST_ADDRESS, TEST_PORT)) self.client.set_membership(MULTICAST_ADDRESS, pyuv.UV_JOIN_GROUP) self.client.set_multicast_ttl(10) self.client.start_recv(self.on_client_recv) self.server.send((MULTICAST_ADDRESS, TEST_PORT), b"PING", self.on_server_send) self.loop.run() self.assertEqual(self.on_close_called, 2) self.assertEquals(self.received_data, b"PING") @platform_skip(["darwin"]) def test_udp_multicast_loop(self): self.on_close_called = 0 self.client = pyuv.UDP(self.loop) self.client.bind((MULTICAST_ADDRESS, TEST_PORT)) self.client.set_membership(MULTICAST_ADDRESS, pyuv.UV_JOIN_GROUP) self.client.set_multicast_loop(True) self.client.start_recv(self.on_client_recv) self.client.send((MULTICAST_ADDRESS, TEST_PORT), b"PING") self.loop.run() self.assertEqual(self.on_close_called, 1) self.assertEquals(self.received_data, b"PING") class UDPTestBigDatagram(unittest2.TestCase): def setUp(self): self.loop = pyuv.Loop.default_loop() def send_cb(self, handle, error): self.handle.close() self.errorno = error def test_udp_big_datagram(self): self.errorno = None self.handle = pyuv.UDP(self.loop) data = b"X"*65536 self.handle.send(("127.0.0.1", TEST_PORT), data, self.send_cb) self.loop.run() self.assertEqual(self.errorno, pyuv.errno.UV_EMSGSIZE) class UDPTestOpen(unittest2.TestCase): def test_udp_open(self): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) loop = pyuv.Loop.default_loop() handle = pyuv.UDP(loop) handle.open(sock.fileno()) try: handle.bind(("1.2.3.4", TEST_PORT)) except pyuv.error.UDPError as e: self.assertEqual(e.args[0], pyuv.errno.UV_EADDRNOTAVAIL) loop.run() if __name__ == '__main__': unittest2.main(verbosity=2)
32.911184
95
0.635882
1,425
10,005
4.285614
0.085614
0.050106
0.070247
0.055674
0.832651
0.827575
0.815294
0.792369
0.768135
0.741281
0
0.024824
0.234983
10,005
303
96
33.019802
0.77306
0.006897
0
0.717842
0
0
0.030916
0
0
0
0
0
0.095436
1
0.161826
false
0.004149
0.016598
0
0.211618
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
8094785e88f1d4a25a8be34399c79fe9f3e9e221
18,149
py
Python
sdk/python/pulumi_azure/avs/cluster.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
109
2018-06-18T00:19:44.000Z
2022-02-20T05:32:57.000Z
sdk/python/pulumi_azure/avs/cluster.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
663
2018-06-18T21:08:46.000Z
2022-03-31T20:10:11.000Z
sdk/python/pulumi_azure/avs/cluster.py
henriktao/pulumi-azure
f1cbcf100b42b916da36d8fe28be3a159abaf022
[ "ECL-2.0", "Apache-2.0" ]
41
2018-07-19T22:37:38.000Z
2022-03-14T10:56:26.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['ClusterArgs', 'Cluster'] @pulumi.input_type class ClusterArgs: def __init__(__self__, *, cluster_node_count: pulumi.Input[int], sku_name: pulumi.Input[str], vmware_cloud_id: pulumi.Input[str], name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Cluster resource. :param pulumi.Input[int] cluster_node_count: The count of the Vmware Cluster nodes. :param pulumi.Input[str] sku_name: The cluster sku to use. Possible values are `av20`, `av36`, and `av36t`. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] vmware_cloud_id: The ID of the Vmware Private Cloud in which to create this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] name: The name which should be used for this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ pulumi.set(__self__, "cluster_node_count", cluster_node_count) pulumi.set(__self__, "sku_name", sku_name) pulumi.set(__self__, "vmware_cloud_id", vmware_cloud_id) if name is not None: pulumi.set(__self__, "name", name) @property @pulumi.getter(name="clusterNodeCount") def cluster_node_count(self) -> pulumi.Input[int]: """ The count of the Vmware Cluster nodes. """ return pulumi.get(self, "cluster_node_count") @cluster_node_count.setter def cluster_node_count(self, value: pulumi.Input[int]): pulumi.set(self, "cluster_node_count", value) @property @pulumi.getter(name="skuName") def sku_name(self) -> pulumi.Input[str]: """ The cluster sku to use. Possible values are `av20`, `av36`, and `av36t`. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "sku_name") @sku_name.setter def sku_name(self, value: pulumi.Input[str]): pulumi.set(self, "sku_name", value) @property @pulumi.getter(name="vmwareCloudId") def vmware_cloud_id(self) -> pulumi.Input[str]: """ The ID of the Vmware Private Cloud in which to create this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "vmware_cloud_id") @vmware_cloud_id.setter def vmware_cloud_id(self, value: pulumi.Input[str]): pulumi.set(self, "vmware_cloud_id", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @pulumi.input_type class _ClusterState: def __init__(__self__, *, cluster_node_count: Optional[pulumi.Input[int]] = None, cluster_number: Optional[pulumi.Input[int]] = None, hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, vmware_cloud_id: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Cluster resources. :param pulumi.Input[int] cluster_node_count: The count of the Vmware Cluster nodes. :param pulumi.Input[int] cluster_number: A number that identifies this Vmware Cluster in its Vmware Private Cloud. :param pulumi.Input[Sequence[pulumi.Input[str]]] hosts: A list of host of the Vmware Cluster. :param pulumi.Input[str] name: The name which should be used for this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] sku_name: The cluster sku to use. Possible values are `av20`, `av36`, and `av36t`. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] vmware_cloud_id: The ID of the Vmware Private Cloud in which to create this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ if cluster_node_count is not None: pulumi.set(__self__, "cluster_node_count", cluster_node_count) if cluster_number is not None: pulumi.set(__self__, "cluster_number", cluster_number) if hosts is not None: pulumi.set(__self__, "hosts", hosts) if name is not None: pulumi.set(__self__, "name", name) if sku_name is not None: pulumi.set(__self__, "sku_name", sku_name) if vmware_cloud_id is not None: pulumi.set(__self__, "vmware_cloud_id", vmware_cloud_id) @property @pulumi.getter(name="clusterNodeCount") def cluster_node_count(self) -> Optional[pulumi.Input[int]]: """ The count of the Vmware Cluster nodes. """ return pulumi.get(self, "cluster_node_count") @cluster_node_count.setter def cluster_node_count(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "cluster_node_count", value) @property @pulumi.getter(name="clusterNumber") def cluster_number(self) -> Optional[pulumi.Input[int]]: """ A number that identifies this Vmware Cluster in its Vmware Private Cloud. """ return pulumi.get(self, "cluster_number") @cluster_number.setter def cluster_number(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "cluster_number", value) @property @pulumi.getter def hosts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ A list of host of the Vmware Cluster. """ return pulumi.get(self, "hosts") @hosts.setter def hosts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "hosts", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="skuName") def sku_name(self) -> Optional[pulumi.Input[str]]: """ The cluster sku to use. Possible values are `av20`, `av36`, and `av36t`. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "sku_name") @sku_name.setter def sku_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sku_name", value) @property @pulumi.getter(name="vmwareCloudId") def vmware_cloud_id(self) -> Optional[pulumi.Input[str]]: """ The ID of the Vmware Private Cloud in which to create this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "vmware_cloud_id") @vmware_cloud_id.setter def vmware_cloud_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "vmware_cloud_id", value) class Cluster(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, cluster_node_count: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, vmware_cloud_id: Optional[pulumi.Input[str]] = None, __props__=None): """ Manages a Vmware Cluster. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_private_cloud = azure.avs.PrivateCloud("examplePrivateCloud", resource_group_name=example_resource_group.name, location=example_resource_group.location, sku_name="av36", management_cluster=azure.avs.PrivateCloudManagementClusterArgs( size=3, ), network_subnet_cidr="192.168.48.0/22", internet_connection_enabled=False, nsxt_password="QazWsx13$Edc", vcenter_password="WsxEdc23$Rfv") example_cluster = azure.avs.Cluster("exampleCluster", vmware_cloud_id=example_private_cloud.id, cluster_node_count=3, sku_name="av36") ``` ## Import Vmware Clusters can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:avs/cluster:Cluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.AVS/privateClouds/privateCloud1/clusters/cluster1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] cluster_node_count: The count of the Vmware Cluster nodes. :param pulumi.Input[str] name: The name which should be used for this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] sku_name: The cluster sku to use. Possible values are `av20`, `av36`, and `av36t`. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] vmware_cloud_id: The ID of the Vmware Private Cloud in which to create this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ ... @overload def __init__(__self__, resource_name: str, args: ClusterArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Vmware Cluster. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_private_cloud = azure.avs.PrivateCloud("examplePrivateCloud", resource_group_name=example_resource_group.name, location=example_resource_group.location, sku_name="av36", management_cluster=azure.avs.PrivateCloudManagementClusterArgs( size=3, ), network_subnet_cidr="192.168.48.0/22", internet_connection_enabled=False, nsxt_password="QazWsx13$Edc", vcenter_password="WsxEdc23$Rfv") example_cluster = azure.avs.Cluster("exampleCluster", vmware_cloud_id=example_private_cloud.id, cluster_node_count=3, sku_name="av36") ``` ## Import Vmware Clusters can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:avs/cluster:Cluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.AVS/privateClouds/privateCloud1/clusters/cluster1 ``` :param str resource_name: The name of the resource. :param ClusterArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ClusterArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, cluster_node_count: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, vmware_cloud_id: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ClusterArgs.__new__(ClusterArgs) if cluster_node_count is None and not opts.urn: raise TypeError("Missing required property 'cluster_node_count'") __props__.__dict__["cluster_node_count"] = cluster_node_count __props__.__dict__["name"] = name if sku_name is None and not opts.urn: raise TypeError("Missing required property 'sku_name'") __props__.__dict__["sku_name"] = sku_name if vmware_cloud_id is None and not opts.urn: raise TypeError("Missing required property 'vmware_cloud_id'") __props__.__dict__["vmware_cloud_id"] = vmware_cloud_id __props__.__dict__["cluster_number"] = None __props__.__dict__["hosts"] = None super(Cluster, __self__).__init__( 'azure:avs/cluster:Cluster', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, cluster_node_count: Optional[pulumi.Input[int]] = None, cluster_number: Optional[pulumi.Input[int]] = None, hosts: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, vmware_cloud_id: Optional[pulumi.Input[str]] = None) -> 'Cluster': """ Get an existing Cluster resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] cluster_node_count: The count of the Vmware Cluster nodes. :param pulumi.Input[int] cluster_number: A number that identifies this Vmware Cluster in its Vmware Private Cloud. :param pulumi.Input[Sequence[pulumi.Input[str]]] hosts: A list of host of the Vmware Cluster. :param pulumi.Input[str] name: The name which should be used for this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] sku_name: The cluster sku to use. Possible values are `av20`, `av36`, and `av36t`. Changing this forces a new Vmware Cluster to be created. :param pulumi.Input[str] vmware_cloud_id: The ID of the Vmware Private Cloud in which to create this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _ClusterState.__new__(_ClusterState) __props__.__dict__["cluster_node_count"] = cluster_node_count __props__.__dict__["cluster_number"] = cluster_number __props__.__dict__["hosts"] = hosts __props__.__dict__["name"] = name __props__.__dict__["sku_name"] = sku_name __props__.__dict__["vmware_cloud_id"] = vmware_cloud_id return Cluster(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="clusterNodeCount") def cluster_node_count(self) -> pulumi.Output[int]: """ The count of the Vmware Cluster nodes. """ return pulumi.get(self, "cluster_node_count") @property @pulumi.getter(name="clusterNumber") def cluster_number(self) -> pulumi.Output[int]: """ A number that identifies this Vmware Cluster in its Vmware Private Cloud. """ return pulumi.get(self, "cluster_number") @property @pulumi.getter def hosts(self) -> pulumi.Output[Sequence[str]]: """ A list of host of the Vmware Cluster. """ return pulumi.get(self, "hosts") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name which should be used for this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "name") @property @pulumi.getter(name="skuName") def sku_name(self) -> pulumi.Output[str]: """ The cluster sku to use. Possible values are `av20`, `av36`, and `av36t`. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "sku_name") @property @pulumi.getter(name="vmwareCloudId") def vmware_cloud_id(self) -> pulumi.Output[str]: """ The ID of the Vmware Private Cloud in which to create this Vmware Cluster. Changing this forces a new Vmware Cluster to be created. """ return pulumi.get(self, "vmware_cloud_id")
44.050971
204
0.649788
2,250
18,149
5.013778
0.093333
0.072157
0.058328
0.040954
0.847265
0.824661
0.797802
0.771474
0.760748
0.73185
0
0.011441
0.253513
18,149
411
205
44.158151
0.821228
0.398369
0
0.580952
1
0
0.100675
0.002555
0
0
0
0
0
1
0.157143
false
0.004762
0.02381
0
0.27619
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
809cf5849514e5c68228614b32967eb5a602bf1d
75
py
Python
test_bot.py
peleccom/rpi_telegram_bot
942a0110f3a37343a8ca1736c8eec49d852adf6c
[ "MIT" ]
1
2017-10-18T16:20:22.000Z
2017-10-18T16:20:22.000Z
test_bot.py
peleccom/rpi_telegram_bot
942a0110f3a37343a8ca1736c8eec49d852adf6c
[ "MIT" ]
null
null
null
test_bot.py
peleccom/rpi_telegram_bot
942a0110f3a37343a8ca1736c8eec49d852adf6c
[ "MIT" ]
null
null
null
import telegram_bot def test_answer(): assert telegram_bot.f(4) == 4
12.5
33
0.706667
12
75
4.166667
0.75
0.44
0
0
0
0
0
0
0
0
0
0.032787
0.186667
75
5
34
15
0.786885
0
0
0
0
0
0
0
0
0
0
0
0.333333
1
0.333333
true
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
0
1
0
0
7
03e3b85baa245eace3d334bcbd1fdd98a75dea0c
22,198
py
Python
dfirtrack_artifacts/tests/artifact/test_artifact_forms.py
blackhatethicalhacking/dfirtrack
9c2e13015291f2981d14d63c9683e7c447e91f3a
[ "MIT" ]
4
2020-03-06T17:37:09.000Z
2020-03-17T07:50:55.000Z
dfirtrack_artifacts/tests/artifact/test_artifact_forms.py
blackhatethicalhacking/dfirtrack
9c2e13015291f2981d14d63c9683e7c447e91f3a
[ "MIT" ]
null
null
null
dfirtrack_artifacts/tests/artifact/test_artifact_forms.py
blackhatethicalhacking/dfirtrack
9c2e13015291f2981d14d63c9683e7c447e91f3a
[ "MIT" ]
1
2020-03-06T20:54:52.000Z
2020-03-06T20:54:52.000Z
from django.contrib.auth.models import User from django.test import TestCase from django.utils import timezone from dfirtrack_artifacts.forms import ArtifactForm from dfirtrack_artifacts.models import Artifactstatus, Artifacttype from dfirtrack_main.models import Case, System, Systemstatus class ArtifactFormTestCase(TestCase): """ artifact form tests """ @classmethod def setUpTestData(cls): # create user test_user = User.objects.create_user(username='testuser_artifact', password='zpdfNMmo3vYrkHrrL6EU') # create object Artifactstatus.objects.create(artifactstatus_name='artifactstatus_1') # create object Artifacttype.objects.create(artifacttype_name='artifacttype_1') # create object Case.objects.create( case_name = 'case_1', case_is_incident = True, case_created_by_user_id = test_user, ) # create object systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1') # create object System.objects.create( system_name='system_1', systemstatus = systemstatus_1, system_modify_time = timezone.now(), system_created_by_user_id = test_user, system_modified_by_user_id = test_user, ) def test_artifact_name_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_name'].label, 'Artifact name (*)') def test_artifact_artifactstatus_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifactstatus'].label, 'Artifactstatus (*)') def test_artifact_artifacttype_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifacttype'].label, 'Artifacttype (*)') def test_artifact_source_path_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_source_path'].label, 'Artifact source path') def test_artifact_system_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['system'].label, 'System (*)') def test_artifact_case_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['case'].label, 'Case') def test_artifact_requested_time_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_requested_time'].label, 'Artifact requested time (YYYY-MM-DD HH:MM:SS)') def test_artifact_acquisition_time_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_acquisition_time'].label, 'Artifact acquisition time (YYYY-MM-DD HH:MM:SS)') def test_artifact_md5_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_md5'].label, 'MD5') def test_artifact_sha1_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_sha1'].label, 'SHA1') def test_artifact_sha256_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_sha256'].label, 'SHA256') def test_artifact_note_form_label(self): """ test form label """ # get object form = ArtifactForm() # compare self.assertEquals(form.fields['artifact_note'].label, 'Artifact note') def test_artifact_form_empty(self): """ test minimum form requirements / INVALID """ # get object form = ArtifactForm(data = {}) # compare self.assertFalse(form.is_valid()) def test_artifact_name_form_filled(self): """ test minimum form requirements / INVALID """ # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', }) # compare self.assertFalse(form.is_valid()) def test_artifact_artifactstatus_form_filled(self): """ test minimum form requirements / INVALID """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, }) # compare self.assertFalse(form.is_valid()) def test_artifact_artifacttype_form_filled(self): """ test minimum form requirements / INVALID """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, }) # compare self.assertFalse(form.is_valid()) def test_artifact_system_form_filled(self): """ test minimum form requirements / VALID """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, }) # compare self.assertTrue(form.is_valid()) def test_artifact_source_path_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_source_path': 'C:\Windows\foo\bar', }) # compare self.assertTrue(form.is_valid()) def test_artifact_case_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object case_id = Case.objects.get(case_name='case_1').case_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'case': case_id, }) # compare self.assertTrue(form.is_valid()) def test_artifact_requested_time_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_requested_time': timezone.now(), }) # compare self.assertTrue(form.is_valid()) def test_artifact_acquisiton_time_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_acquisiton_time': timezone.now(), }) # compare self.assertTrue(form.is_valid()) def test_artifact_md5_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_md5': 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', }) # compare self.assertTrue(form.is_valid()) def test_artifact_sha1_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha1': 'ssssssssssssssssssssssssssssssssssssssss', }) # compare self.assertTrue(form.is_valid()) def test_artifact_sha256_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha256': 'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss', }) # compare self.assertTrue(form.is_valid()) def test_artifact_note_form_filled(self): """ test additional form content """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_note': 'lorem ipsum', }) # compare self.assertTrue(form.is_valid()) """ the length of the following attributes is not tested at the moment due to their enormous numbers * artifact_name * artifact_source_path * artifact_storage_path """ def test_artifact_md5_proper_chars(self): """ test for max length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_md5': 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', }) # compare self.assertTrue(form.is_valid()) def test_artifact_md5_too_many_chars(self): """ test for max length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_md5': 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', }) # compare self.assertFalse(form.is_valid()) def test_artifact_md5_too_less_chars(self): """ test for min length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_md5': 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm', }) # compare self.assertFalse(form.is_valid()) def test_artifact_sha1_proper_chars(self): """ test for max length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha1': 'ssssssssssssssssssssssssssssssssssssssss', }) # compare self.assertTrue(form.is_valid()) def test_artifact_sha1_too_many_chars(self): """ test for max length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha1': 'sssssssssssssssssssssssssssssssssssssssss', }) # compare self.assertFalse(form.is_valid()) def test_artifact_sha1_too_less_chars(self): """ test for min length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha1': 'sssssssssssssssssssssssssssssssssssssss', }) # compare self.assertFalse(form.is_valid()) def test_artifact_sha256_proper_chars(self): """ test for max length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha256': 'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss', }) # compare self.assertTrue(form.is_valid()) def test_artifact_sha256_too_many_chars(self): """ test for max length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha256': 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss', }) # compare self.assertFalse(form.is_valid()) def test_artifact_sha256_too_less_chars(self): """ test for min length """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_sha256': 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss', }) # compare self.assertFalse(form.is_valid()) def test_artifact_requested_time_formatcheck(self): """ test input format """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_requested_time': 'wrong format', }) # compare self.assertFalse(form.is_valid()) def test_artifact_acquisiton_time_formatcheck(self): """ test input format """ # get object artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name='artifactstatus_1').artifactstatus_id # get object artifacttype_id = Artifacttype.objects.get(artifacttype_name='artifacttype_1').artifacttype_id # get object system_id = System.objects.get(system_name='system_1').system_id # get object form = ArtifactForm(data = { 'artifact_name': 'artifact_1', 'artifactstatus': artifactstatus_id, 'artifacttype': artifacttype_id, 'system': system_id, 'artifact_acquisition_time': 'wrong format', }) # compare self.assertFalse(form.is_valid())
37.496622
124
0.642265
2,186
22,198
6.237877
0.05581
0.066002
0.051628
0.066002
0.880243
0.86037
0.850323
0.847316
0.844309
0.817395
0
0.009206
0.261105
22,198
591
125
37.560068
0.822156
0.106766
0
0.729231
0
0
0.175235
0.035638
0
0
0
0
0.110769
1
0.113846
false
0.003077
0.018462
0
0.135385
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
205d9c31bfeb82563d1c3cdc39f11ee14c9c347c
2,357
py
Python
tests/unit/test_negotiation.py
drvinceknight/coord_coop
3c3bd9943a8d350d61fae3823ee24ff4d13c2e71
[ "MIT" ]
2
2018-12-24T18:42:49.000Z
2022-03-11T05:16:16.000Z
tests/unit/test_negotiation.py
drvinceknight/coord_coop
3c3bd9943a8d350d61fae3823ee24ff4d13c2e71
[ "MIT" ]
3
2018-10-17T12:10:08.000Z
2018-10-17T22:07:57.000Z
tests/unit/test_negotiation.py
drvinceknight/coord_coop
3c3bd9943a8d350d61fae3823ee24ff4d13c2e71
[ "MIT" ]
null
null
null
import random import coord_coop as cc from coord_coop.actions import C, D from coord_coop.negotiation import get_actions, negotiate, update_random_player def test_update_random_player_and_get_actions(): players = ( cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=0 ), cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=1 ), cc.strategies.SingleBoundaryStrategy( boundary=2, number_of_players=3, p=1 ), ) assert get_actions(players) == (D, C, C) random.seed(0) assert update_random_player(players) is True assert get_actions(players) == (C, C, C) assert update_random_player(players) is False players = ( cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=0 ), cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=1 ), cc.strategies.SingleBoundaryStrategy( boundary=2, number_of_players=3, p=1 ), ) assert get_actions(players) == (D, C, C) random.seed(1) assert update_random_player(players) is True assert get_actions(players) == (D, C, D) assert update_random_player(players) is True assert get_actions(players) == (C, C, D) assert update_random_player(players) is True assert get_actions(players) == (C, C, C) assert update_random_player(players) is False def test_negotiate(): players = ( cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=0 ), cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=1 ), cc.strategies.SingleBoundaryStrategy( boundary=2, number_of_players=3, p=1 ), ) random.seed(0) assert negotiate(players) == [(D, C, C), (C, C, C)] players = ( cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=0 ), cc.strategies.SingleBoundaryStrategy( boundary=1, number_of_players=3, p=1 ), cc.strategies.SingleBoundaryStrategy( boundary=2, number_of_players=3, p=1 ), ) random.seed(2) assert negotiate(players) == [(D, C, C), (D, C, D), (D, D, D)]
31.013158
79
0.627068
289
2,357
4.923875
0.117647
0.101195
0.286718
0.354181
0.864371
0.864371
0.828531
0.828531
0.828531
0.828531
0
0.023148
0.266865
2,357
75
80
31.426667
0.800347
0
0
0.764706
0
0
0
0
0
0
0
0
0.205882
1
0.029412
false
0
0.058824
0
0.088235
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
9
458a53d32ff0de663a7ae49361d01363d9917848
3,123
py
Python
tests/data/test_bo_blacklist_user.py
c17r/TagTrain
5aa1ca36439cc5e81d0c691f905a4bb879b78399
[ "MIT" ]
null
null
null
tests/data/test_bo_blacklist_user.py
c17r/TagTrain
5aa1ca36439cc5e81d0c691f905a4bb879b78399
[ "MIT" ]
7
2020-03-24T17:54:31.000Z
2021-09-21T12:34:34.000Z
tests/data/test_bo_blacklist_user.py
c17r/TagTrain
5aa1ca36439cc5e81d0c691f905a4bb879b78399
[ "MIT" ]
null
null
null
import pytest from . import db from .db import database from tagtrain import data def test_unknown_user(database): with pytest.raises(data.Group.DoesNotExist): data.by_owner.blacklist_user('non-existent', 'blockee', 'permalink', db.GROUP_NAME) def test_unknown_group(database): with pytest.raises(data.Group.DoesNotExist): data.by_owner.blacklist_user(db.OWNER_NAME, 'blockee', 'permalink', 'non-existent') def test_existing_blanket(database): with pytest.raises(data.by_owner.BlanketBlackList): data.by_owner.blacklist_user('user2', 'blockee', 'permalink', 'group2') def test_existing_blacklist(database): PERMALINK = '123' blacklist, created = data.by_owner.blacklist_user(db.OWNER_NAME, 'blockee', PERMALINK, db.GROUP_NAME) assert created is False assert blacklist.perma_proof != PERMALINK def test_good_blanket(database): OWNER_NAME = db.OWNER_NAME MEMBER_NAME = 'four' PERMALINK = 'my123' bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 0 groups = list(data.by_member.find_groups(MEMBER_NAME)) assert len(groups) == 4 bl, created = data.by_owner.blacklist_user(OWNER_NAME, MEMBER_NAME, PERMALINK) assert created is True assert bl.owner_reddit_name == OWNER_NAME assert bl.blocked_reddit_name == MEMBER_NAME assert bl.group is None assert bl.perma_proof == PERMALINK bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 1 groups = list(data.by_member.find_groups(MEMBER_NAME)) assert len(groups) == 1 def test_good_group1(database): OWNER_NAME = db.OWNER_NAME MEMBER_NAME = 'blockee' GROUP_NAME = 'group3' PERMALINK = 'my123' bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 2 bl, created = data.by_owner.blacklist_user(OWNER_NAME, MEMBER_NAME, PERMALINK, GROUP_NAME) assert created is True assert bl.owner_reddit_name == OWNER_NAME assert bl.blocked_reddit_name == MEMBER_NAME assert bl.group is not None assert bl.group.name == GROUP_NAME assert bl.perma_proof == PERMALINK bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 3 def test_good_group_delete(database): OWNER_NAME = db.OWNER_NAME MEMBER_NAME = 'four' GROUP_NAME = 'group3' PERMALINK = 'my123' bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 0 groups = list(data.by_member.find_groups(MEMBER_NAME)) assert len(groups) == 4 bl, created = data.by_owner.blacklist_user(OWNER_NAME, MEMBER_NAME, PERMALINK, GROUP_NAME) assert created is True assert bl.owner_reddit_name == OWNER_NAME assert bl.blocked_reddit_name == MEMBER_NAME assert bl.group is not None assert bl.group.name == GROUP_NAME assert bl.perma_proof == PERMALINK bls = list(data.by_owner.find_blacklists(OWNER_NAME, MEMBER_NAME)) assert len(bls) == 1 groups = list(data.by_member.find_groups(MEMBER_NAME)) assert len(groups) == 3
30.320388
105
0.723343
443
3,123
4.844244
0.133183
0.097856
0.097856
0.106244
0.83178
0.782852
0.77959
0.77959
0.761883
0.72274
0
0.01049
0.175793
3,123
102
106
30.617647
0.823232
0
0
0.638889
0
0
0.043228
0
0
0
0
0
0.402778
1
0.097222
false
0
0.055556
0
0.152778
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
7
b367905d658a97baf57cbff7e98553572d5470a5
32,182
py
Python
domainbed/algorithms copy.py
bismex/DomainBed
27335e6ba24a946fedd2c52b13e39df132a89008
[ "MIT" ]
null
null
null
domainbed/algorithms copy.py
bismex/DomainBed
27335e6ba24a946fedd2c52b13e39df132a89008
[ "MIT" ]
null
null
null
domainbed/algorithms copy.py
bismex/DomainBed
27335e6ba24a946fedd2c52b13e39df132a89008
[ "MIT" ]
1
2022-03-11T11:09:12.000Z
2022-03-11T11:09:12.000Z
# if self.combine_list == 'none': # # classification loss of original images [CE_ori1] # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', True) # elif self.combine_list == 'randconv_mix': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # alpha = np.random.random() # x_mix = (alpha*x_tgt + (1-alpha)*all_x) # p_tgt, z_tgt = self.classifier(self.featurizer(x_mix.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_mix_ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_mix_ori' + '_' + name, False) # self.analysis_statistics(x_mix, all_y, 'gen1_mix_ori', True) # elif self.combine_list == 'randconv_identity': # # classification loss of original images [CE_ori1] # if np.random.random() < 0.5: # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # else: # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_mix_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', True) # elif self.combine_list == 'combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_combined = torch.cat([all_x, x_tgt]) # y_combined = torch.cat([all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', True) # elif self.combine_list == 'ori_mix': # # classification loss of original images [CE_ori1] # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # # classification loss of mixed images [CE_gen1] # rand = torch.rand(len(all_x), 1, 1, 1).cuda() # x_mix = rand*all_x + (1-rand)*x_tgt # p_mix, z_mix = self.classifier(self.featurizer(x_mix.detach()), mode='train') # mix_cls_loss = self.cls_criterion(p_mix, all_y) # loss += self.CE_gen1*mix_cls_loss # self.analysis_logit(p_mix, all_y, None, 'gen1_mix_ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # self.analysis_statistics(x_mix, all_y, 'gen1_mix_ori', True) # elif self.combine_list == 'ori_mix_combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # rand = torch.rand(len(all_x), 1, 1, 1).cuda() # x_mix = rand*all_x + (1-rand)*x_tgt # x_combined = torch.cat([all_x, x_tgt, x_mix]) # y_combined = torch.cat([all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen1_mix_ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # self.analysis_statistics(x_mix, all_y, 'gen1_mix_ori', True) # elif self.combine_list == 'gen_mix': # # classification loss of original images [CE_ori1] # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # # classification loss of mixed images [CE_gen1] # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g1_net(all_x, rand=True, debug=self.debug) # rand = torch.rand(len(all_x), 1, 1, 1).cuda() # x_mix = rand*x_tgt2 + (1-rand)*x_tgt # p_mix, z_mix = self.classifier(self.featurizer(x_mix.detach()), mode='train') # mix_cls_loss = self.cls_criterion(p_mix, all_y) # loss += self.CE_gen1*mix_cls_loss # self.analysis_logit(p_mix, all_y, None, 'gen1_from_ori_2') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen1_from_ori_2' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen1_from_ori_2', False) # self.analysis_statistics(x_mix, all_y, 'mix_from_gen1', True) # elif self.combine_list == 'gen_mix_combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g1_net(all_x, rand=True, debug=self.debug) # rand = torch.rand(len(all_x), 1, 1, 1).cuda() # x_mix = rand*x_tgt2 + (1-rand)*x_tgt # x_combined = torch.cat([all_x, x_tgt, x_mix]) # y_combined = torch.cat([all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen1_from_ori_2') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen1_from_ori_2' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen1_from_ori_2', False) # self.analysis_statistics(x_mix, all_y, 'mix_from_gen1', True) # elif self.combine_list == 'bootstrap1': # # classification loss of original images [CE_ori1] # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # # classification loss of bootstraped images [CE_gen1] # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g1_net(x_tgt, rand=True, debug=self.debug) # p_tgt2, z_tgt2 = self.classifier(self.featurizer(x_tgt2.detach()), mode='train') # tgt_cls_loss2 = self.cls_criterion(p_tgt2, all_y) # loss += self.CE_gen1*tgt_cls_loss2 # self.analysis_logit(p_tgt2, all_y, None, 'gen1_from_gen1') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen1_from_gen1' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen1_from_gen1', True) # elif self.combine_list == 'bootstrap1_combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g1_net(x_tgt, rand=True, debug=self.debug) # x_combined = torch.cat([all_x, x_tgt, x_tgt2]) # y_combined = torch.cat([all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen1_from_gen1') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen1_from_gen1' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen1_from_gen1', True) # elif self.combine_list == 'bootstrap2': # # classification loss of original images [CE_ori1] # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # # classification loss of bootstraped images [CE_gen1] # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(x_tgt, rand=True, debug=self.debug) # p_tgt2, z_tgt2 = self.classifier(self.featurizer(x_tgt2.detach()), mode='train') # tgt_cls_loss2 = self.cls_criterion(p_tgt2, all_y) # loss += self.CE_gen1*tgt_cls_loss2 # self.analysis_logit(p_tgt2, all_y, None, 'gen2_from_gen1') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_gen1' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_gen1', True) # elif self.combine_list == 'bootstrap2_combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(x_tgt, rand=True, debug=self.debug) # x_combined = torch.cat([all_x, x_tgt, x_tgt2]) # y_combined = torch.cat([all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen2_from_gen1') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_gen1' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_gen1', True) # elif self.combine_list == 'bootstrap3_combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(x_tgt, rand=True, debug=self.debug) # x_tgt3, x_tgt3_feat, x_tgt3_name = self.g3_net(x_tgt2, rand=True, debug=self.debug) # x_combined = torch.cat([all_x, x_tgt, x_tgt2, x_tgt3]) # y_combined = torch.cat([all_y, all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen2_from_gen1') # self.analysis_logit(p_combined, y_combined, torch.arange(3*len(all_y), 4*len(all_y)), 'gen3_from_gen2') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_gen1' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_gen1', False) # for cnt, (val, name) in enumerate(zip(x_tgt3_feat, x_tgt3_name)): # self.analysis_statistics(val, all_y, '2' + str(cnt).zfill(2) + '_gen3_from_gen2' + '_' + name, False) # self.analysis_statistics(x_tgt3, all_y, 'gen3_from_gen2', True) # elif self.combine_list == 'bootstrap4_combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(x_tgt, rand=True, debug=self.debug) # x_tgt3, x_tgt3_feat, x_tgt3_name = self.g3_net(x_tgt2, rand=True, debug=self.debug) # x_tgt4, x_tgt4_feat, x_tgt4_name = self.g4_net(x_tgt3, rand=True, debug=self.debug) # x_combined = torch.cat([all_x, x_tgt, x_tgt2, x_tgt3, x_tgt4]) # y_combined = torch.cat([all_y, all_y, all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen2_from_gen1') # self.analysis_logit(p_combined, y_combined, torch.arange(3*len(all_y), 4*len(all_y)), 'gen3_from_gen2') # self.analysis_logit(p_combined, y_combined, torch.arange(4*len(all_y), 5*len(all_y)), 'gen4_from_gen3') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_gen1' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_gen1', False) # for cnt, (val, name) in enumerate(zip(x_tgt3_feat, x_tgt3_name)): # self.analysis_statistics(val, all_y, '2' + str(cnt).zfill(2) + '_gen3_from_gen2' + '_' + name, False) # self.analysis_statistics(x_tgt3, all_y, 'gen3_from_gen2', False) # for cnt, (val, name) in enumerate(zip(x_tgt4_feat, x_tgt4_name)): # self.analysis_statistics(val, all_y, '3' + str(cnt).zfill(2) + '_gen4_from_gen3' + '_' + name, False) # self.analysis_statistics(x_tgt4, all_y, 'gen4_from_gen3', True) # elif self.combine_list == 'gen2': # # classification loss of original images [CE_ori1] # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # # classification loss of bootstraped images [CE_gen1] # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(all_x, rand=True, debug=self.debug) # p_tgt2, z_tgt2 = self.classifier(self.featurizer(x_tgt2.detach()), mode='train') # tgt_cls_loss2 = self.cls_criterion(p_tgt2, all_y) # loss += self.CE_gen1*tgt_cls_loss2 # self.analysis_logit(p_tgt2, all_y, None, 'gen2_from_ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_ori', True) # elif self.combine_list == 'gen2_combine': # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(all_x, rand=True, debug=self.debug) # x_combined = torch.cat([all_x, x_tgt, x_tgt2]) # y_combined = torch.cat([all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen2_from_ori') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_ori', True) # elif self.combine_list == 'gen2_mix': # # classification loss of original images [CE_ori1] # p1_src, z1_src = self.classifier(self.featurizer(all_x.detach()), mode='train') # src_cls_loss = self.cls_criterion(p1_src, all_y) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p1_src, all_y, None, 'ori') # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # p_tgt, z_tgt = self.classifier(self.featurizer(x_tgt.detach()), mode='train') # tgt_cls_loss = self.cls_criterion(p_tgt, all_y) # loss += self.CE_gen1*tgt_cls_loss # self.analysis_logit(p_tgt, all_y, None, 'gen1_from_ori') # # classification loss of bootstraped images [CE_gen1] # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(all_x, rand=True, debug=self.debug) # p_tgt2, z_tgt2 = self.classifier(self.featurizer(x_tgt2.detach()), mode='train') # tgt_cls_loss2 = self.cls_criterion(p_tgt2, all_y) # loss += self.CE_gen1*tgt_cls_loss2 # self.analysis_logit(p_tgt2, all_y, None, 'gen2_from_ori') # # classification loss of mixed images [CE_gen1] # rand = torch.rand(len(all_x), 1, 1, 1).cuda() # x_mix = rand*x_tgt + (1-rand)*x_tgt2 # p_mix, z_mix = self.classifier(self.featurizer(x_mix.detach()), mode='train') # mix_cls_loss = self.cls_criterion(p_mix, all_y) # loss += self.CE_gen1*mix_cls_loss # self.analysis_logit(p_mix, all_y, None, 'gen2_mix_gen1') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_ori', False) # self.analysis_statistics(x_mix, all_y, 'gen2_mix_gen1', True) # elif self.combine_list == 'gen2_mix_combine': # # classification loss of generated images [CE_gen1] # x_tgt, x_tgt_feat, x_tgt_name = self.g1_net(all_x, rand=True, debug=self.debug) # x_tgt2, x_tgt2_feat, x_tgt2_name = self.g2_net(all_x, rand=True, debug=self.debug) # rand = torch.rand(len(all_x), 1, 1, 1).cuda() # x_mix = rand*x_tgt + (1-rand)*x_tgt2 # x_combined = torch.cat([all_x, x_tgt, x_tgt2, x_mix]) # y_combined = torch.cat([all_y, all_y, all_y, all_y]) # p_combined, z_combined = self.classifier(self.featurizer(x_combined.detach()), mode='train') # src_cls_loss = self.cls_criterion(p_combined, y_combined) # loss += self.CE_ori1*src_cls_loss # self.analysis_logit(p_combined, y_combined, torch.arange(0*len(all_y), 1*len(all_y)), 'ori') # self.analysis_logit(p_combined, y_combined, torch.arange(1*len(all_y), 2*len(all_y)), 'gen1_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(2*len(all_y), 3*len(all_y)), 'gen2_from_ori') # self.analysis_logit(p_combined, y_combined, torch.arange(3*len(all_y), 4*len(all_y)), 'gen2_mix_gen1') # self.analysis_statistics(all_x, all_y, 'ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt_feat, x_tgt_name)): # self.analysis_statistics(val, all_y, '0' + str(cnt).zfill(2) + '_gen1_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt, all_y, 'gen1_from_ori', False) # for cnt, (val, name) in enumerate(zip(x_tgt2_feat, x_tgt2_name)): # self.analysis_statistics(val, all_y, '1' + str(cnt).zfill(2) + '_gen2_from_ori' + '_' + name, False) # self.analysis_statistics(x_tgt2, all_y, 'gen2_from_ori', False) # self.analysis_statistics(x_mix, all_y, 'gen2_mix_gen1', True)
71.834821
125
0.588745
4,568
32,182
3.789186
0.019264
0.052689
0.114391
0.047836
0.989427
0.98625
0.982841
0.973829
0.970189
0.964007
0
0.024391
0.280219
32,182
448
126
71.834821
0.722846
0.826704
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
8
2ff8c3e32f1b987de63aee68a84a78c4bd8f84f5
3,814
py
Python
tests/visual/test_demo_forms.py
willist/django-material
73e50eb0105a67dde1c3f6846f868f10bda1f4ea
[ "BSD-3-Clause" ]
2,703
2015-02-05T00:55:14.000Z
2022-03-16T19:58:23.000Z
tests/visual/test_demo_forms.py
willist/django-material
73e50eb0105a67dde1c3f6846f868f10bda1f4ea
[ "BSD-3-Clause" ]
495
2015-04-03T14:20:23.000Z
2022-03-01T13:05:51.000Z
tests/visual/test_demo_forms.py
willist/django-material
73e50eb0105a67dde1c3f6846f868f10bda1f4ea
[ "BSD-3-Clause" ]
552
2015-04-04T12:09:36.000Z
2022-03-04T13:59:19.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test.utils import override_settings from . import VisualTest @override_settings(ROOT_URLCONF='demo.tests.test_forms_login') class TestLoginForm(VisualTest): def test_default_usecase(self): self.driver.get('%s/demo/login/' % self.live_server_url) self.assertScreenshot('.card', 'form_login_default_usecase') def test_invalid_data(self): self.driver.get('%s/demo/login/' % self.live_server_url) self.driver.find_element_by_css_selector("button[type=submit]").click() self.assertScreenshot('.card', 'form_login_invalid_data') @override_settings(ROOT_URLCONF='demo.tests.test_forms_registration') class TestRegistrationForm(VisualTest): def test_default_usecase(self): self.driver.get('%s/demo/registration/' % self.live_server_url) self.assertScreenshot('.card', 'form_registration_default_usecase') def test_invalid_data(self): self.driver.get('%s/demo/registration/' % self.live_server_url) self.driver.find_element_by_css_selector("button[type=submit]").click() self.assertScreenshot('.card', 'form_registration_invalid_data') @override_settings(ROOT_URLCONF='demo.tests.test_forms_contact') class TestContactForm(VisualTest): def test_default_usecase(self): self.driver.get('%s/demo/contact/' % self.live_server_url) self.assertScreenshot('.card', 'form_contact_default_usecase') def test_invalid_data(self): self.driver.get('%s/demo/contact/' % self.live_server_url) self.driver.find_element_by_css_selector("button[type=submit]").click() self.assertScreenshot('.card', 'form_contact_invalid_data') @override_settings(ROOT_URLCONF='demo.tests.test_forms_order') class TestOrderForm(VisualTest): def test_default_usecase(self): self.driver.get('%s/demo/order/' % self.live_server_url) self.assertScreenshot('.card', 'form_order_default_usecase') def test_invalid_data(self): self.driver.get('%s/demo/order/' % self.live_server_url) self.driver.find_element_by_css_selector("button[type=submit]").click() self.assertScreenshot('.card', 'form_order_invalid_data') @override_settings(ROOT_URLCONF='demo.tests.test_forms_checkout') class TestCheckoutForm(VisualTest): def test_default_usecase(self): self.driver.get('%s/demo/checkout/' % self.live_server_url) self.assertScreenshot('.card', 'form_checkout_default_usecase') def test_invalid_data(self): self.driver.get('%s/demo/checkout/' % self.live_server_url) self.driver.find_element_by_css_selector("button[type=submit]").click() self.assertScreenshot('.card', 'form_checkout_invalid_data') @override_settings(ROOT_URLCONF='demo.tests.test_forms_comment') class TestCommentForm(VisualTest): def test_default_usecase(self): self.driver.get('%s/demo/comment/' % self.live_server_url) self.assertScreenshot('.card', 'form_comment_default_usecase') def test_invalid_data(self): self.driver.get('%s/demo/comment/' % self.live_server_url) self.driver.find_element_by_css_selector("button[type=submit]").click() self.assertScreenshot('.card', 'form_comment_invalid_data') @override_settings(ROOT_URLCONF='demo.tests.test_forms_bank') class TestBankForm(VisualTest): def test_default_usecase(self): self.driver.get('%s/demo/bank/' % self.live_server_url) self.assertScreenshot('.card', 'form_bank_default_usecase') def test_invalid_data(self): self.driver.get('%s/demo/bank/' % self.live_server_url) self.driver.find_element_by_css_selector("button[type=submit]").click() self.assertScreenshot('.card', 'form_bank_invalid_data')
39.319588
79
0.733351
487
3,814
5.414784
0.119097
0.079636
0.074327
0.090254
0.886234
0.849829
0.849829
0.849829
0.769056
0.769056
0
0.000302
0.133194
3,814
96
80
39.729167
0.797338
0.005506
0
0.530303
0
0
0.262728
0.161699
0
0
0
0
0.212121
1
0.212121
false
0
0.045455
0
0.363636
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
7
ff2d512c10b7fa8749e258f20fc8514482329eba
97
py
Python
froide_campaign/models/__init__.py
krmax44/froide-campaign
7f3a318fbd194dfd7b3560bb877a64e8c1dfd814
[ "MIT" ]
5
2016-01-27T19:00:50.000Z
2021-11-15T12:23:24.000Z
froide_campaign/models/__init__.py
krmax44/froide-campaign
7f3a318fbd194dfd7b3560bb877a64e8c1dfd814
[ "MIT" ]
2
2020-11-02T11:48:44.000Z
2020-11-03T15:39:46.000Z
froide_campaign/models/__init__.py
krmax44/froide-campaign
7f3a318fbd194dfd7b3560bb877a64e8c1dfd814
[ "MIT" ]
1
2020-10-30T09:20:53.000Z
2020-10-30T09:20:53.000Z
from .campaign import * # NOQA from .cms_plugins import * # NOQA from .report import * # NOQA
24.25
34
0.690722
13
97
5.076923
0.538462
0.454545
0.424242
0
0
0
0
0
0
0
0
0
0.216495
97
3
35
32.333333
0.868421
0.14433
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
ff3015c769881561fc5164ca813b1efdfb2571ea
79,651
py
Python
sdk/python/pulumi_ns1/team.py
pulumi/pulumi-ns1
7200ab674c814fd18f8b59a90ee130574df4eafc
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
sdk/python/pulumi_ns1/team.py
pulumi/pulumi-ns1
7200ab674c814fd18f8b59a90ee130574df4eafc
[ "ECL-2.0", "Apache-2.0" ]
43
2020-06-24T11:18:00.000Z
2022-03-31T15:37:47.000Z
sdk/python/pulumi_ns1/team.py
pulumi/pulumi-ns1
7200ab674c814fd18f8b59a90ee130574df4eafc
[ "ECL-2.0", "Apache-2.0" ]
1
2021-01-12T23:15:35.000Z
2021-01-12T23:15:35.000Z
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities from . import outputs from ._inputs import * __all__ = ['TeamArgs', 'Team'] @pulumi.input_type class TeamArgs: def __init__(__self__, *, account_manage_account_settings: Optional[pulumi.Input[bool]] = None, account_manage_apikeys: Optional[pulumi.Input[bool]] = None, account_manage_ip_whitelist: Optional[pulumi.Input[bool]] = None, account_manage_payment_methods: Optional[pulumi.Input[bool]] = None, account_manage_plan: Optional[pulumi.Input[bool]] = None, account_manage_teams: Optional[pulumi.Input[bool]] = None, account_manage_users: Optional[pulumi.Input[bool]] = None, account_view_activity_log: Optional[pulumi.Input[bool]] = None, account_view_invoices: Optional[pulumi.Input[bool]] = None, data_manage_datafeeds: Optional[pulumi.Input[bool]] = None, data_manage_datasources: Optional[pulumi.Input[bool]] = None, data_push_to_datafeeds: Optional[pulumi.Input[bool]] = None, dhcp_manage_dhcp: Optional[pulumi.Input[bool]] = None, dhcp_view_dhcp: Optional[pulumi.Input[bool]] = None, dns_manage_zones: Optional[pulumi.Input[bool]] = None, dns_records_allows: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsAllowArgs']]]] = None, dns_records_denies: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsDenyArgs']]]] = None, dns_view_zones: Optional[pulumi.Input[bool]] = None, dns_zones_allow_by_default: Optional[pulumi.Input[bool]] = None, dns_zones_allows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dns_zones_denies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]]] = None, ipam_manage_ipam: Optional[pulumi.Input[bool]] = None, ipam_view_ipam: Optional[pulumi.Input[bool]] = None, monitoring_manage_jobs: Optional[pulumi.Input[bool]] = None, monitoring_manage_lists: Optional[pulumi.Input[bool]] = None, monitoring_view_jobs: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, security_manage_active_directory: Optional[pulumi.Input[bool]] = None, security_manage_global2fa: Optional[pulumi.Input[bool]] = None): """ The set of arguments for constructing a Team resource. :param pulumi.Input[bool] account_manage_account_settings: Whether the team can modify account settings. :param pulumi.Input[bool] account_manage_apikeys: Whether the team can modify account apikeys. :param pulumi.Input[bool] account_manage_ip_whitelist: Whether the team can manage ip whitelist. :param pulumi.Input[bool] account_manage_payment_methods: Whether the team can modify account payment methods. :param pulumi.Input[bool] account_manage_plan: Whether the team can modify the account plan. :param pulumi.Input[bool] account_manage_teams: Whether the team can modify other teams in the account. :param pulumi.Input[bool] account_manage_users: Whether the team can modify account users. :param pulumi.Input[bool] account_view_activity_log: Whether the team can view activity logs. :param pulumi.Input[bool] account_view_invoices: Whether the team can view invoices. :param pulumi.Input[bool] data_manage_datafeeds: Whether the team can modify data feeds. :param pulumi.Input[bool] data_manage_datasources: Whether the team can modify data sources. :param pulumi.Input[bool] data_push_to_datafeeds: Whether the team can publish to data feeds. :param pulumi.Input[bool] dhcp_manage_dhcp: Whether the team can manage DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dhcp_view_dhcp: Whether the team can view DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dns_manage_zones: Whether the team can modify the accounts zones. :param pulumi.Input[bool] dns_view_zones: Whether the team can view the accounts zones. :param pulumi.Input[bool] dns_zones_allow_by_default: If true, enable the `dns_zones_allow` list, otherwise enable the `dns_zones_deny` list. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_allows: List of zones that the team may access. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_denies: List of zones that the team may not access. :param pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]] ip_whitelists: Array of IP addresses objects to chich to grant the team access. Each object includes a **name** (string), and **values** (array of strings) associated to each "allow" list. :param pulumi.Input[bool] ipam_manage_ipam: Whether the team can manage IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] ipam_view_ipam: Whether the team can view IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] monitoring_manage_jobs: Whether the team can modify monitoring jobs. :param pulumi.Input[bool] monitoring_manage_lists: Whether the team can modify notification lists. :param pulumi.Input[bool] monitoring_view_jobs: Whether the team can view monitoring jobs. :param pulumi.Input[str] name: The free form name of the team. :param pulumi.Input[bool] security_manage_active_directory: Whether the team can manage global active directory. Only relevant for the DDI product. :param pulumi.Input[bool] security_manage_global2fa: Whether the team can manage global two factor authentication. """ if account_manage_account_settings is not None: pulumi.set(__self__, "account_manage_account_settings", account_manage_account_settings) if account_manage_apikeys is not None: pulumi.set(__self__, "account_manage_apikeys", account_manage_apikeys) if account_manage_ip_whitelist is not None: pulumi.set(__self__, "account_manage_ip_whitelist", account_manage_ip_whitelist) if account_manage_payment_methods is not None: pulumi.set(__self__, "account_manage_payment_methods", account_manage_payment_methods) if account_manage_plan is not None: warnings.warn("""obsolete, should no longer be used""", DeprecationWarning) pulumi.log.warn("""account_manage_plan is deprecated: obsolete, should no longer be used""") if account_manage_plan is not None: pulumi.set(__self__, "account_manage_plan", account_manage_plan) if account_manage_teams is not None: pulumi.set(__self__, "account_manage_teams", account_manage_teams) if account_manage_users is not None: pulumi.set(__self__, "account_manage_users", account_manage_users) if account_view_activity_log is not None: pulumi.set(__self__, "account_view_activity_log", account_view_activity_log) if account_view_invoices is not None: pulumi.set(__self__, "account_view_invoices", account_view_invoices) if data_manage_datafeeds is not None: pulumi.set(__self__, "data_manage_datafeeds", data_manage_datafeeds) if data_manage_datasources is not None: pulumi.set(__self__, "data_manage_datasources", data_manage_datasources) if data_push_to_datafeeds is not None: pulumi.set(__self__, "data_push_to_datafeeds", data_push_to_datafeeds) if dhcp_manage_dhcp is not None: pulumi.set(__self__, "dhcp_manage_dhcp", dhcp_manage_dhcp) if dhcp_view_dhcp is not None: pulumi.set(__self__, "dhcp_view_dhcp", dhcp_view_dhcp) if dns_manage_zones is not None: pulumi.set(__self__, "dns_manage_zones", dns_manage_zones) if dns_records_allows is not None: pulumi.set(__self__, "dns_records_allows", dns_records_allows) if dns_records_denies is not None: pulumi.set(__self__, "dns_records_denies", dns_records_denies) if dns_view_zones is not None: pulumi.set(__self__, "dns_view_zones", dns_view_zones) if dns_zones_allow_by_default is not None: pulumi.set(__self__, "dns_zones_allow_by_default", dns_zones_allow_by_default) if dns_zones_allows is not None: pulumi.set(__self__, "dns_zones_allows", dns_zones_allows) if dns_zones_denies is not None: pulumi.set(__self__, "dns_zones_denies", dns_zones_denies) if ip_whitelists is not None: pulumi.set(__self__, "ip_whitelists", ip_whitelists) if ipam_manage_ipam is not None: pulumi.set(__self__, "ipam_manage_ipam", ipam_manage_ipam) if ipam_view_ipam is not None: pulumi.set(__self__, "ipam_view_ipam", ipam_view_ipam) if monitoring_manage_jobs is not None: pulumi.set(__self__, "monitoring_manage_jobs", monitoring_manage_jobs) if monitoring_manage_lists is not None: pulumi.set(__self__, "monitoring_manage_lists", monitoring_manage_lists) if monitoring_view_jobs is not None: pulumi.set(__self__, "monitoring_view_jobs", monitoring_view_jobs) if name is not None: pulumi.set(__self__, "name", name) if security_manage_active_directory is not None: pulumi.set(__self__, "security_manage_active_directory", security_manage_active_directory) if security_manage_global2fa is not None: pulumi.set(__self__, "security_manage_global2fa", security_manage_global2fa) @property @pulumi.getter(name="accountManageAccountSettings") def account_manage_account_settings(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account settings. """ return pulumi.get(self, "account_manage_account_settings") @account_manage_account_settings.setter def account_manage_account_settings(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_account_settings", value) @property @pulumi.getter(name="accountManageApikeys") def account_manage_apikeys(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account apikeys. """ return pulumi.get(self, "account_manage_apikeys") @account_manage_apikeys.setter def account_manage_apikeys(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_apikeys", value) @property @pulumi.getter(name="accountManageIpWhitelist") def account_manage_ip_whitelist(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage ip whitelist. """ return pulumi.get(self, "account_manage_ip_whitelist") @account_manage_ip_whitelist.setter def account_manage_ip_whitelist(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_ip_whitelist", value) @property @pulumi.getter(name="accountManagePaymentMethods") def account_manage_payment_methods(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account payment methods. """ return pulumi.get(self, "account_manage_payment_methods") @account_manage_payment_methods.setter def account_manage_payment_methods(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_payment_methods", value) @property @pulumi.getter(name="accountManagePlan") def account_manage_plan(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify the account plan. """ return pulumi.get(self, "account_manage_plan") @account_manage_plan.setter def account_manage_plan(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_plan", value) @property @pulumi.getter(name="accountManageTeams") def account_manage_teams(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify other teams in the account. """ return pulumi.get(self, "account_manage_teams") @account_manage_teams.setter def account_manage_teams(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_teams", value) @property @pulumi.getter(name="accountManageUsers") def account_manage_users(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account users. """ return pulumi.get(self, "account_manage_users") @account_manage_users.setter def account_manage_users(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_users", value) @property @pulumi.getter(name="accountViewActivityLog") def account_view_activity_log(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view activity logs. """ return pulumi.get(self, "account_view_activity_log") @account_view_activity_log.setter def account_view_activity_log(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_view_activity_log", value) @property @pulumi.getter(name="accountViewInvoices") def account_view_invoices(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view invoices. """ return pulumi.get(self, "account_view_invoices") @account_view_invoices.setter def account_view_invoices(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_view_invoices", value) @property @pulumi.getter(name="dataManageDatafeeds") def data_manage_datafeeds(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify data feeds. """ return pulumi.get(self, "data_manage_datafeeds") @data_manage_datafeeds.setter def data_manage_datafeeds(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "data_manage_datafeeds", value) @property @pulumi.getter(name="dataManageDatasources") def data_manage_datasources(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify data sources. """ return pulumi.get(self, "data_manage_datasources") @data_manage_datasources.setter def data_manage_datasources(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "data_manage_datasources", value) @property @pulumi.getter(name="dataPushToDatafeeds") def data_push_to_datafeeds(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can publish to data feeds. """ return pulumi.get(self, "data_push_to_datafeeds") @data_push_to_datafeeds.setter def data_push_to_datafeeds(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "data_push_to_datafeeds", value) @property @pulumi.getter(name="dhcpManageDhcp") def dhcp_manage_dhcp(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage DHCP. Only relevant for the DDI product. """ return pulumi.get(self, "dhcp_manage_dhcp") @dhcp_manage_dhcp.setter def dhcp_manage_dhcp(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dhcp_manage_dhcp", value) @property @pulumi.getter(name="dhcpViewDhcp") def dhcp_view_dhcp(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view DHCP. Only relevant for the DDI product. """ return pulumi.get(self, "dhcp_view_dhcp") @dhcp_view_dhcp.setter def dhcp_view_dhcp(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dhcp_view_dhcp", value) @property @pulumi.getter(name="dnsManageZones") def dns_manage_zones(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify the accounts zones. """ return pulumi.get(self, "dns_manage_zones") @dns_manage_zones.setter def dns_manage_zones(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dns_manage_zones", value) @property @pulumi.getter(name="dnsRecordsAllows") def dns_records_allows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsAllowArgs']]]]: return pulumi.get(self, "dns_records_allows") @dns_records_allows.setter def dns_records_allows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsAllowArgs']]]]): pulumi.set(self, "dns_records_allows", value) @property @pulumi.getter(name="dnsRecordsDenies") def dns_records_denies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsDenyArgs']]]]: return pulumi.get(self, "dns_records_denies") @dns_records_denies.setter def dns_records_denies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsDenyArgs']]]]): pulumi.set(self, "dns_records_denies", value) @property @pulumi.getter(name="dnsViewZones") def dns_view_zones(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view the accounts zones. """ return pulumi.get(self, "dns_view_zones") @dns_view_zones.setter def dns_view_zones(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dns_view_zones", value) @property @pulumi.getter(name="dnsZonesAllowByDefault") def dns_zones_allow_by_default(self) -> Optional[pulumi.Input[bool]]: """ If true, enable the `dns_zones_allow` list, otherwise enable the `dns_zones_deny` list. """ return pulumi.get(self, "dns_zones_allow_by_default") @dns_zones_allow_by_default.setter def dns_zones_allow_by_default(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dns_zones_allow_by_default", value) @property @pulumi.getter(name="dnsZonesAllows") def dns_zones_allows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of zones that the team may access. """ return pulumi.get(self, "dns_zones_allows") @dns_zones_allows.setter def dns_zones_allows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dns_zones_allows", value) @property @pulumi.getter(name="dnsZonesDenies") def dns_zones_denies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of zones that the team may not access. """ return pulumi.get(self, "dns_zones_denies") @dns_zones_denies.setter def dns_zones_denies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dns_zones_denies", value) @property @pulumi.getter(name="ipWhitelists") def ip_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]]]: """ Array of IP addresses objects to chich to grant the team access. Each object includes a **name** (string), and **values** (array of strings) associated to each "allow" list. """ return pulumi.get(self, "ip_whitelists") @ip_whitelists.setter def ip_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]]]): pulumi.set(self, "ip_whitelists", value) @property @pulumi.getter(name="ipamManageIpam") def ipam_manage_ipam(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage IPAM. Only relevant for the DDI product. """ return pulumi.get(self, "ipam_manage_ipam") @ipam_manage_ipam.setter def ipam_manage_ipam(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ipam_manage_ipam", value) @property @pulumi.getter(name="ipamViewIpam") def ipam_view_ipam(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view IPAM. Only relevant for the DDI product. """ return pulumi.get(self, "ipam_view_ipam") @ipam_view_ipam.setter def ipam_view_ipam(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ipam_view_ipam", value) @property @pulumi.getter(name="monitoringManageJobs") def monitoring_manage_jobs(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify monitoring jobs. """ return pulumi.get(self, "monitoring_manage_jobs") @monitoring_manage_jobs.setter def monitoring_manage_jobs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "monitoring_manage_jobs", value) @property @pulumi.getter(name="monitoringManageLists") def monitoring_manage_lists(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify notification lists. """ return pulumi.get(self, "monitoring_manage_lists") @monitoring_manage_lists.setter def monitoring_manage_lists(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "monitoring_manage_lists", value) @property @pulumi.getter(name="monitoringViewJobs") def monitoring_view_jobs(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view monitoring jobs. """ return pulumi.get(self, "monitoring_view_jobs") @monitoring_view_jobs.setter def monitoring_view_jobs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "monitoring_view_jobs", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The free form name of the team. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="securityManageActiveDirectory") def security_manage_active_directory(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage global active directory. Only relevant for the DDI product. """ return pulumi.get(self, "security_manage_active_directory") @security_manage_active_directory.setter def security_manage_active_directory(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "security_manage_active_directory", value) @property @pulumi.getter(name="securityManageGlobal2fa") def security_manage_global2fa(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage global two factor authentication. """ return pulumi.get(self, "security_manage_global2fa") @security_manage_global2fa.setter def security_manage_global2fa(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "security_manage_global2fa", value) @pulumi.input_type class _TeamState: def __init__(__self__, *, account_manage_account_settings: Optional[pulumi.Input[bool]] = None, account_manage_apikeys: Optional[pulumi.Input[bool]] = None, account_manage_ip_whitelist: Optional[pulumi.Input[bool]] = None, account_manage_payment_methods: Optional[pulumi.Input[bool]] = None, account_manage_plan: Optional[pulumi.Input[bool]] = None, account_manage_teams: Optional[pulumi.Input[bool]] = None, account_manage_users: Optional[pulumi.Input[bool]] = None, account_view_activity_log: Optional[pulumi.Input[bool]] = None, account_view_invoices: Optional[pulumi.Input[bool]] = None, data_manage_datafeeds: Optional[pulumi.Input[bool]] = None, data_manage_datasources: Optional[pulumi.Input[bool]] = None, data_push_to_datafeeds: Optional[pulumi.Input[bool]] = None, dhcp_manage_dhcp: Optional[pulumi.Input[bool]] = None, dhcp_view_dhcp: Optional[pulumi.Input[bool]] = None, dns_manage_zones: Optional[pulumi.Input[bool]] = None, dns_records_allows: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsAllowArgs']]]] = None, dns_records_denies: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsDenyArgs']]]] = None, dns_view_zones: Optional[pulumi.Input[bool]] = None, dns_zones_allow_by_default: Optional[pulumi.Input[bool]] = None, dns_zones_allows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dns_zones_denies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]]] = None, ipam_manage_ipam: Optional[pulumi.Input[bool]] = None, ipam_view_ipam: Optional[pulumi.Input[bool]] = None, monitoring_manage_jobs: Optional[pulumi.Input[bool]] = None, monitoring_manage_lists: Optional[pulumi.Input[bool]] = None, monitoring_view_jobs: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, security_manage_active_directory: Optional[pulumi.Input[bool]] = None, security_manage_global2fa: Optional[pulumi.Input[bool]] = None): """ Input properties used for looking up and filtering Team resources. :param pulumi.Input[bool] account_manage_account_settings: Whether the team can modify account settings. :param pulumi.Input[bool] account_manage_apikeys: Whether the team can modify account apikeys. :param pulumi.Input[bool] account_manage_ip_whitelist: Whether the team can manage ip whitelist. :param pulumi.Input[bool] account_manage_payment_methods: Whether the team can modify account payment methods. :param pulumi.Input[bool] account_manage_plan: Whether the team can modify the account plan. :param pulumi.Input[bool] account_manage_teams: Whether the team can modify other teams in the account. :param pulumi.Input[bool] account_manage_users: Whether the team can modify account users. :param pulumi.Input[bool] account_view_activity_log: Whether the team can view activity logs. :param pulumi.Input[bool] account_view_invoices: Whether the team can view invoices. :param pulumi.Input[bool] data_manage_datafeeds: Whether the team can modify data feeds. :param pulumi.Input[bool] data_manage_datasources: Whether the team can modify data sources. :param pulumi.Input[bool] data_push_to_datafeeds: Whether the team can publish to data feeds. :param pulumi.Input[bool] dhcp_manage_dhcp: Whether the team can manage DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dhcp_view_dhcp: Whether the team can view DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dns_manage_zones: Whether the team can modify the accounts zones. :param pulumi.Input[bool] dns_view_zones: Whether the team can view the accounts zones. :param pulumi.Input[bool] dns_zones_allow_by_default: If true, enable the `dns_zones_allow` list, otherwise enable the `dns_zones_deny` list. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_allows: List of zones that the team may access. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_denies: List of zones that the team may not access. :param pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]] ip_whitelists: Array of IP addresses objects to chich to grant the team access. Each object includes a **name** (string), and **values** (array of strings) associated to each "allow" list. :param pulumi.Input[bool] ipam_manage_ipam: Whether the team can manage IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] ipam_view_ipam: Whether the team can view IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] monitoring_manage_jobs: Whether the team can modify monitoring jobs. :param pulumi.Input[bool] monitoring_manage_lists: Whether the team can modify notification lists. :param pulumi.Input[bool] monitoring_view_jobs: Whether the team can view monitoring jobs. :param pulumi.Input[str] name: The free form name of the team. :param pulumi.Input[bool] security_manage_active_directory: Whether the team can manage global active directory. Only relevant for the DDI product. :param pulumi.Input[bool] security_manage_global2fa: Whether the team can manage global two factor authentication. """ if account_manage_account_settings is not None: pulumi.set(__self__, "account_manage_account_settings", account_manage_account_settings) if account_manage_apikeys is not None: pulumi.set(__self__, "account_manage_apikeys", account_manage_apikeys) if account_manage_ip_whitelist is not None: pulumi.set(__self__, "account_manage_ip_whitelist", account_manage_ip_whitelist) if account_manage_payment_methods is not None: pulumi.set(__self__, "account_manage_payment_methods", account_manage_payment_methods) if account_manage_plan is not None: warnings.warn("""obsolete, should no longer be used""", DeprecationWarning) pulumi.log.warn("""account_manage_plan is deprecated: obsolete, should no longer be used""") if account_manage_plan is not None: pulumi.set(__self__, "account_manage_plan", account_manage_plan) if account_manage_teams is not None: pulumi.set(__self__, "account_manage_teams", account_manage_teams) if account_manage_users is not None: pulumi.set(__self__, "account_manage_users", account_manage_users) if account_view_activity_log is not None: pulumi.set(__self__, "account_view_activity_log", account_view_activity_log) if account_view_invoices is not None: pulumi.set(__self__, "account_view_invoices", account_view_invoices) if data_manage_datafeeds is not None: pulumi.set(__self__, "data_manage_datafeeds", data_manage_datafeeds) if data_manage_datasources is not None: pulumi.set(__self__, "data_manage_datasources", data_manage_datasources) if data_push_to_datafeeds is not None: pulumi.set(__self__, "data_push_to_datafeeds", data_push_to_datafeeds) if dhcp_manage_dhcp is not None: pulumi.set(__self__, "dhcp_manage_dhcp", dhcp_manage_dhcp) if dhcp_view_dhcp is not None: pulumi.set(__self__, "dhcp_view_dhcp", dhcp_view_dhcp) if dns_manage_zones is not None: pulumi.set(__self__, "dns_manage_zones", dns_manage_zones) if dns_records_allows is not None: pulumi.set(__self__, "dns_records_allows", dns_records_allows) if dns_records_denies is not None: pulumi.set(__self__, "dns_records_denies", dns_records_denies) if dns_view_zones is not None: pulumi.set(__self__, "dns_view_zones", dns_view_zones) if dns_zones_allow_by_default is not None: pulumi.set(__self__, "dns_zones_allow_by_default", dns_zones_allow_by_default) if dns_zones_allows is not None: pulumi.set(__self__, "dns_zones_allows", dns_zones_allows) if dns_zones_denies is not None: pulumi.set(__self__, "dns_zones_denies", dns_zones_denies) if ip_whitelists is not None: pulumi.set(__self__, "ip_whitelists", ip_whitelists) if ipam_manage_ipam is not None: pulumi.set(__self__, "ipam_manage_ipam", ipam_manage_ipam) if ipam_view_ipam is not None: pulumi.set(__self__, "ipam_view_ipam", ipam_view_ipam) if monitoring_manage_jobs is not None: pulumi.set(__self__, "monitoring_manage_jobs", monitoring_manage_jobs) if monitoring_manage_lists is not None: pulumi.set(__self__, "monitoring_manage_lists", monitoring_manage_lists) if monitoring_view_jobs is not None: pulumi.set(__self__, "monitoring_view_jobs", monitoring_view_jobs) if name is not None: pulumi.set(__self__, "name", name) if security_manage_active_directory is not None: pulumi.set(__self__, "security_manage_active_directory", security_manage_active_directory) if security_manage_global2fa is not None: pulumi.set(__self__, "security_manage_global2fa", security_manage_global2fa) @property @pulumi.getter(name="accountManageAccountSettings") def account_manage_account_settings(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account settings. """ return pulumi.get(self, "account_manage_account_settings") @account_manage_account_settings.setter def account_manage_account_settings(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_account_settings", value) @property @pulumi.getter(name="accountManageApikeys") def account_manage_apikeys(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account apikeys. """ return pulumi.get(self, "account_manage_apikeys") @account_manage_apikeys.setter def account_manage_apikeys(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_apikeys", value) @property @pulumi.getter(name="accountManageIpWhitelist") def account_manage_ip_whitelist(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage ip whitelist. """ return pulumi.get(self, "account_manage_ip_whitelist") @account_manage_ip_whitelist.setter def account_manage_ip_whitelist(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_ip_whitelist", value) @property @pulumi.getter(name="accountManagePaymentMethods") def account_manage_payment_methods(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account payment methods. """ return pulumi.get(self, "account_manage_payment_methods") @account_manage_payment_methods.setter def account_manage_payment_methods(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_payment_methods", value) @property @pulumi.getter(name="accountManagePlan") def account_manage_plan(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify the account plan. """ return pulumi.get(self, "account_manage_plan") @account_manage_plan.setter def account_manage_plan(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_plan", value) @property @pulumi.getter(name="accountManageTeams") def account_manage_teams(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify other teams in the account. """ return pulumi.get(self, "account_manage_teams") @account_manage_teams.setter def account_manage_teams(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_teams", value) @property @pulumi.getter(name="accountManageUsers") def account_manage_users(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify account users. """ return pulumi.get(self, "account_manage_users") @account_manage_users.setter def account_manage_users(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_manage_users", value) @property @pulumi.getter(name="accountViewActivityLog") def account_view_activity_log(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view activity logs. """ return pulumi.get(self, "account_view_activity_log") @account_view_activity_log.setter def account_view_activity_log(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_view_activity_log", value) @property @pulumi.getter(name="accountViewInvoices") def account_view_invoices(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view invoices. """ return pulumi.get(self, "account_view_invoices") @account_view_invoices.setter def account_view_invoices(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "account_view_invoices", value) @property @pulumi.getter(name="dataManageDatafeeds") def data_manage_datafeeds(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify data feeds. """ return pulumi.get(self, "data_manage_datafeeds") @data_manage_datafeeds.setter def data_manage_datafeeds(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "data_manage_datafeeds", value) @property @pulumi.getter(name="dataManageDatasources") def data_manage_datasources(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify data sources. """ return pulumi.get(self, "data_manage_datasources") @data_manage_datasources.setter def data_manage_datasources(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "data_manage_datasources", value) @property @pulumi.getter(name="dataPushToDatafeeds") def data_push_to_datafeeds(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can publish to data feeds. """ return pulumi.get(self, "data_push_to_datafeeds") @data_push_to_datafeeds.setter def data_push_to_datafeeds(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "data_push_to_datafeeds", value) @property @pulumi.getter(name="dhcpManageDhcp") def dhcp_manage_dhcp(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage DHCP. Only relevant for the DDI product. """ return pulumi.get(self, "dhcp_manage_dhcp") @dhcp_manage_dhcp.setter def dhcp_manage_dhcp(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dhcp_manage_dhcp", value) @property @pulumi.getter(name="dhcpViewDhcp") def dhcp_view_dhcp(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view DHCP. Only relevant for the DDI product. """ return pulumi.get(self, "dhcp_view_dhcp") @dhcp_view_dhcp.setter def dhcp_view_dhcp(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dhcp_view_dhcp", value) @property @pulumi.getter(name="dnsManageZones") def dns_manage_zones(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify the accounts zones. """ return pulumi.get(self, "dns_manage_zones") @dns_manage_zones.setter def dns_manage_zones(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dns_manage_zones", value) @property @pulumi.getter(name="dnsRecordsAllows") def dns_records_allows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsAllowArgs']]]]: return pulumi.get(self, "dns_records_allows") @dns_records_allows.setter def dns_records_allows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsAllowArgs']]]]): pulumi.set(self, "dns_records_allows", value) @property @pulumi.getter(name="dnsRecordsDenies") def dns_records_denies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsDenyArgs']]]]: return pulumi.get(self, "dns_records_denies") @dns_records_denies.setter def dns_records_denies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TeamDnsRecordsDenyArgs']]]]): pulumi.set(self, "dns_records_denies", value) @property @pulumi.getter(name="dnsViewZones") def dns_view_zones(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view the accounts zones. """ return pulumi.get(self, "dns_view_zones") @dns_view_zones.setter def dns_view_zones(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dns_view_zones", value) @property @pulumi.getter(name="dnsZonesAllowByDefault") def dns_zones_allow_by_default(self) -> Optional[pulumi.Input[bool]]: """ If true, enable the `dns_zones_allow` list, otherwise enable the `dns_zones_deny` list. """ return pulumi.get(self, "dns_zones_allow_by_default") @dns_zones_allow_by_default.setter def dns_zones_allow_by_default(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dns_zones_allow_by_default", value) @property @pulumi.getter(name="dnsZonesAllows") def dns_zones_allows(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of zones that the team may access. """ return pulumi.get(self, "dns_zones_allows") @dns_zones_allows.setter def dns_zones_allows(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dns_zones_allows", value) @property @pulumi.getter(name="dnsZonesDenies") def dns_zones_denies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of zones that the team may not access. """ return pulumi.get(self, "dns_zones_denies") @dns_zones_denies.setter def dns_zones_denies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "dns_zones_denies", value) @property @pulumi.getter(name="ipWhitelists") def ip_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]]]: """ Array of IP addresses objects to chich to grant the team access. Each object includes a **name** (string), and **values** (array of strings) associated to each "allow" list. """ return pulumi.get(self, "ip_whitelists") @ip_whitelists.setter def ip_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TeamIpWhitelistArgs']]]]): pulumi.set(self, "ip_whitelists", value) @property @pulumi.getter(name="ipamManageIpam") def ipam_manage_ipam(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage IPAM. Only relevant for the DDI product. """ return pulumi.get(self, "ipam_manage_ipam") @ipam_manage_ipam.setter def ipam_manage_ipam(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ipam_manage_ipam", value) @property @pulumi.getter(name="ipamViewIpam") def ipam_view_ipam(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view IPAM. Only relevant for the DDI product. """ return pulumi.get(self, "ipam_view_ipam") @ipam_view_ipam.setter def ipam_view_ipam(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "ipam_view_ipam", value) @property @pulumi.getter(name="monitoringManageJobs") def monitoring_manage_jobs(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify monitoring jobs. """ return pulumi.get(self, "monitoring_manage_jobs") @monitoring_manage_jobs.setter def monitoring_manage_jobs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "monitoring_manage_jobs", value) @property @pulumi.getter(name="monitoringManageLists") def monitoring_manage_lists(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can modify notification lists. """ return pulumi.get(self, "monitoring_manage_lists") @monitoring_manage_lists.setter def monitoring_manage_lists(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "monitoring_manage_lists", value) @property @pulumi.getter(name="monitoringViewJobs") def monitoring_view_jobs(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can view monitoring jobs. """ return pulumi.get(self, "monitoring_view_jobs") @monitoring_view_jobs.setter def monitoring_view_jobs(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "monitoring_view_jobs", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The free form name of the team. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="securityManageActiveDirectory") def security_manage_active_directory(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage global active directory. Only relevant for the DDI product. """ return pulumi.get(self, "security_manage_active_directory") @security_manage_active_directory.setter def security_manage_active_directory(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "security_manage_active_directory", value) @property @pulumi.getter(name="securityManageGlobal2fa") def security_manage_global2fa(self) -> Optional[pulumi.Input[bool]]: """ Whether the team can manage global two factor authentication. """ return pulumi.get(self, "security_manage_global2fa") @security_manage_global2fa.setter def security_manage_global2fa(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "security_manage_global2fa", value) class Team(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_manage_account_settings: Optional[pulumi.Input[bool]] = None, account_manage_apikeys: Optional[pulumi.Input[bool]] = None, account_manage_ip_whitelist: Optional[pulumi.Input[bool]] = None, account_manage_payment_methods: Optional[pulumi.Input[bool]] = None, account_manage_plan: Optional[pulumi.Input[bool]] = None, account_manage_teams: Optional[pulumi.Input[bool]] = None, account_manage_users: Optional[pulumi.Input[bool]] = None, account_view_activity_log: Optional[pulumi.Input[bool]] = None, account_view_invoices: Optional[pulumi.Input[bool]] = None, data_manage_datafeeds: Optional[pulumi.Input[bool]] = None, data_manage_datasources: Optional[pulumi.Input[bool]] = None, data_push_to_datafeeds: Optional[pulumi.Input[bool]] = None, dhcp_manage_dhcp: Optional[pulumi.Input[bool]] = None, dhcp_view_dhcp: Optional[pulumi.Input[bool]] = None, dns_manage_zones: Optional[pulumi.Input[bool]] = None, dns_records_allows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamDnsRecordsAllowArgs']]]]] = None, dns_records_denies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamDnsRecordsDenyArgs']]]]] = None, dns_view_zones: Optional[pulumi.Input[bool]] = None, dns_zones_allow_by_default: Optional[pulumi.Input[bool]] = None, dns_zones_allows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dns_zones_denies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamIpWhitelistArgs']]]]] = None, ipam_manage_ipam: Optional[pulumi.Input[bool]] = None, ipam_view_ipam: Optional[pulumi.Input[bool]] = None, monitoring_manage_jobs: Optional[pulumi.Input[bool]] = None, monitoring_manage_lists: Optional[pulumi.Input[bool]] = None, monitoring_view_jobs: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, security_manage_active_directory: Optional[pulumi.Input[bool]] = None, security_manage_global2fa: Optional[pulumi.Input[bool]] = None, __props__=None): """ Provides a NS1 Team resource. This can be used to create, modify, and delete teams. The credentials used must have the `manage_teams` permission set. ## Example Usage ```python import pulumi import pulumi_ns1 as ns1 # Create a new NS1 Team example = ns1.Team("example", account_manage_users=False, dns_view_zones=False, ip_whitelists=[ ns1.TeamIpWhitelistArgs( name="whitelist-1", values=[ "1.1.1.1", "2.2.2.2", ], ), ns1.TeamIpWhitelistArgs( name="whitelist-2", values=[ "3.3.3.3", "4.4.4.4", ], ), ]) # Another team example2 = ns1.Team("example2", data_manage_datasources=True, dns_records_allows=[ns1.TeamDnsRecordsAllowArgs( domain="terraform.example.io", include_subdomains=False, type="A", zone="example.io", )], dns_view_zones=True, dns_zones_allows=["mytest.zone"], dns_zones_allow_by_default=True, dns_zones_denies=["myother.zone"]) ``` ## NS1 Documentation [Team Api Docs](https://ns1.com/api#team) :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] account_manage_account_settings: Whether the team can modify account settings. :param pulumi.Input[bool] account_manage_apikeys: Whether the team can modify account apikeys. :param pulumi.Input[bool] account_manage_ip_whitelist: Whether the team can manage ip whitelist. :param pulumi.Input[bool] account_manage_payment_methods: Whether the team can modify account payment methods. :param pulumi.Input[bool] account_manage_plan: Whether the team can modify the account plan. :param pulumi.Input[bool] account_manage_teams: Whether the team can modify other teams in the account. :param pulumi.Input[bool] account_manage_users: Whether the team can modify account users. :param pulumi.Input[bool] account_view_activity_log: Whether the team can view activity logs. :param pulumi.Input[bool] account_view_invoices: Whether the team can view invoices. :param pulumi.Input[bool] data_manage_datafeeds: Whether the team can modify data feeds. :param pulumi.Input[bool] data_manage_datasources: Whether the team can modify data sources. :param pulumi.Input[bool] data_push_to_datafeeds: Whether the team can publish to data feeds. :param pulumi.Input[bool] dhcp_manage_dhcp: Whether the team can manage DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dhcp_view_dhcp: Whether the team can view DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dns_manage_zones: Whether the team can modify the accounts zones. :param pulumi.Input[bool] dns_view_zones: Whether the team can view the accounts zones. :param pulumi.Input[bool] dns_zones_allow_by_default: If true, enable the `dns_zones_allow` list, otherwise enable the `dns_zones_deny` list. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_allows: List of zones that the team may access. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_denies: List of zones that the team may not access. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamIpWhitelistArgs']]]] ip_whitelists: Array of IP addresses objects to chich to grant the team access. Each object includes a **name** (string), and **values** (array of strings) associated to each "allow" list. :param pulumi.Input[bool] ipam_manage_ipam: Whether the team can manage IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] ipam_view_ipam: Whether the team can view IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] monitoring_manage_jobs: Whether the team can modify monitoring jobs. :param pulumi.Input[bool] monitoring_manage_lists: Whether the team can modify notification lists. :param pulumi.Input[bool] monitoring_view_jobs: Whether the team can view monitoring jobs. :param pulumi.Input[str] name: The free form name of the team. :param pulumi.Input[bool] security_manage_active_directory: Whether the team can manage global active directory. Only relevant for the DDI product. :param pulumi.Input[bool] security_manage_global2fa: Whether the team can manage global two factor authentication. """ ... @overload def __init__(__self__, resource_name: str, args: Optional[TeamArgs] = None, opts: Optional[pulumi.ResourceOptions] = None): """ Provides a NS1 Team resource. This can be used to create, modify, and delete teams. The credentials used must have the `manage_teams` permission set. ## Example Usage ```python import pulumi import pulumi_ns1 as ns1 # Create a new NS1 Team example = ns1.Team("example", account_manage_users=False, dns_view_zones=False, ip_whitelists=[ ns1.TeamIpWhitelistArgs( name="whitelist-1", values=[ "1.1.1.1", "2.2.2.2", ], ), ns1.TeamIpWhitelistArgs( name="whitelist-2", values=[ "3.3.3.3", "4.4.4.4", ], ), ]) # Another team example2 = ns1.Team("example2", data_manage_datasources=True, dns_records_allows=[ns1.TeamDnsRecordsAllowArgs( domain="terraform.example.io", include_subdomains=False, type="A", zone="example.io", )], dns_view_zones=True, dns_zones_allows=["mytest.zone"], dns_zones_allow_by_default=True, dns_zones_denies=["myother.zone"]) ``` ## NS1 Documentation [Team Api Docs](https://ns1.com/api#team) :param str resource_name: The name of the resource. :param TeamArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TeamArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_manage_account_settings: Optional[pulumi.Input[bool]] = None, account_manage_apikeys: Optional[pulumi.Input[bool]] = None, account_manage_ip_whitelist: Optional[pulumi.Input[bool]] = None, account_manage_payment_methods: Optional[pulumi.Input[bool]] = None, account_manage_plan: Optional[pulumi.Input[bool]] = None, account_manage_teams: Optional[pulumi.Input[bool]] = None, account_manage_users: Optional[pulumi.Input[bool]] = None, account_view_activity_log: Optional[pulumi.Input[bool]] = None, account_view_invoices: Optional[pulumi.Input[bool]] = None, data_manage_datafeeds: Optional[pulumi.Input[bool]] = None, data_manage_datasources: Optional[pulumi.Input[bool]] = None, data_push_to_datafeeds: Optional[pulumi.Input[bool]] = None, dhcp_manage_dhcp: Optional[pulumi.Input[bool]] = None, dhcp_view_dhcp: Optional[pulumi.Input[bool]] = None, dns_manage_zones: Optional[pulumi.Input[bool]] = None, dns_records_allows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamDnsRecordsAllowArgs']]]]] = None, dns_records_denies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamDnsRecordsDenyArgs']]]]] = None, dns_view_zones: Optional[pulumi.Input[bool]] = None, dns_zones_allow_by_default: Optional[pulumi.Input[bool]] = None, dns_zones_allows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dns_zones_denies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamIpWhitelistArgs']]]]] = None, ipam_manage_ipam: Optional[pulumi.Input[bool]] = None, ipam_view_ipam: Optional[pulumi.Input[bool]] = None, monitoring_manage_jobs: Optional[pulumi.Input[bool]] = None, monitoring_manage_lists: Optional[pulumi.Input[bool]] = None, monitoring_view_jobs: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, security_manage_active_directory: Optional[pulumi.Input[bool]] = None, security_manage_global2fa: Optional[pulumi.Input[bool]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TeamArgs.__new__(TeamArgs) __props__.__dict__["account_manage_account_settings"] = account_manage_account_settings __props__.__dict__["account_manage_apikeys"] = account_manage_apikeys __props__.__dict__["account_manage_ip_whitelist"] = account_manage_ip_whitelist __props__.__dict__["account_manage_payment_methods"] = account_manage_payment_methods if account_manage_plan is not None and not opts.urn: warnings.warn("""obsolete, should no longer be used""", DeprecationWarning) pulumi.log.warn("""account_manage_plan is deprecated: obsolete, should no longer be used""") __props__.__dict__["account_manage_plan"] = account_manage_plan __props__.__dict__["account_manage_teams"] = account_manage_teams __props__.__dict__["account_manage_users"] = account_manage_users __props__.__dict__["account_view_activity_log"] = account_view_activity_log __props__.__dict__["account_view_invoices"] = account_view_invoices __props__.__dict__["data_manage_datafeeds"] = data_manage_datafeeds __props__.__dict__["data_manage_datasources"] = data_manage_datasources __props__.__dict__["data_push_to_datafeeds"] = data_push_to_datafeeds __props__.__dict__["dhcp_manage_dhcp"] = dhcp_manage_dhcp __props__.__dict__["dhcp_view_dhcp"] = dhcp_view_dhcp __props__.__dict__["dns_manage_zones"] = dns_manage_zones __props__.__dict__["dns_records_allows"] = dns_records_allows __props__.__dict__["dns_records_denies"] = dns_records_denies __props__.__dict__["dns_view_zones"] = dns_view_zones __props__.__dict__["dns_zones_allow_by_default"] = dns_zones_allow_by_default __props__.__dict__["dns_zones_allows"] = dns_zones_allows __props__.__dict__["dns_zones_denies"] = dns_zones_denies __props__.__dict__["ip_whitelists"] = ip_whitelists __props__.__dict__["ipam_manage_ipam"] = ipam_manage_ipam __props__.__dict__["ipam_view_ipam"] = ipam_view_ipam __props__.__dict__["monitoring_manage_jobs"] = monitoring_manage_jobs __props__.__dict__["monitoring_manage_lists"] = monitoring_manage_lists __props__.__dict__["monitoring_view_jobs"] = monitoring_view_jobs __props__.__dict__["name"] = name __props__.__dict__["security_manage_active_directory"] = security_manage_active_directory __props__.__dict__["security_manage_global2fa"] = security_manage_global2fa super(Team, __self__).__init__( 'ns1:index/team:Team', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, account_manage_account_settings: Optional[pulumi.Input[bool]] = None, account_manage_apikeys: Optional[pulumi.Input[bool]] = None, account_manage_ip_whitelist: Optional[pulumi.Input[bool]] = None, account_manage_payment_methods: Optional[pulumi.Input[bool]] = None, account_manage_plan: Optional[pulumi.Input[bool]] = None, account_manage_teams: Optional[pulumi.Input[bool]] = None, account_manage_users: Optional[pulumi.Input[bool]] = None, account_view_activity_log: Optional[pulumi.Input[bool]] = None, account_view_invoices: Optional[pulumi.Input[bool]] = None, data_manage_datafeeds: Optional[pulumi.Input[bool]] = None, data_manage_datasources: Optional[pulumi.Input[bool]] = None, data_push_to_datafeeds: Optional[pulumi.Input[bool]] = None, dhcp_manage_dhcp: Optional[pulumi.Input[bool]] = None, dhcp_view_dhcp: Optional[pulumi.Input[bool]] = None, dns_manage_zones: Optional[pulumi.Input[bool]] = None, dns_records_allows: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamDnsRecordsAllowArgs']]]]] = None, dns_records_denies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamDnsRecordsDenyArgs']]]]] = None, dns_view_zones: Optional[pulumi.Input[bool]] = None, dns_zones_allow_by_default: Optional[pulumi.Input[bool]] = None, dns_zones_allows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, dns_zones_denies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamIpWhitelistArgs']]]]] = None, ipam_manage_ipam: Optional[pulumi.Input[bool]] = None, ipam_view_ipam: Optional[pulumi.Input[bool]] = None, monitoring_manage_jobs: Optional[pulumi.Input[bool]] = None, monitoring_manage_lists: Optional[pulumi.Input[bool]] = None, monitoring_view_jobs: Optional[pulumi.Input[bool]] = None, name: Optional[pulumi.Input[str]] = None, security_manage_active_directory: Optional[pulumi.Input[bool]] = None, security_manage_global2fa: Optional[pulumi.Input[bool]] = None) -> 'Team': """ Get an existing Team resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] account_manage_account_settings: Whether the team can modify account settings. :param pulumi.Input[bool] account_manage_apikeys: Whether the team can modify account apikeys. :param pulumi.Input[bool] account_manage_ip_whitelist: Whether the team can manage ip whitelist. :param pulumi.Input[bool] account_manage_payment_methods: Whether the team can modify account payment methods. :param pulumi.Input[bool] account_manage_plan: Whether the team can modify the account plan. :param pulumi.Input[bool] account_manage_teams: Whether the team can modify other teams in the account. :param pulumi.Input[bool] account_manage_users: Whether the team can modify account users. :param pulumi.Input[bool] account_view_activity_log: Whether the team can view activity logs. :param pulumi.Input[bool] account_view_invoices: Whether the team can view invoices. :param pulumi.Input[bool] data_manage_datafeeds: Whether the team can modify data feeds. :param pulumi.Input[bool] data_manage_datasources: Whether the team can modify data sources. :param pulumi.Input[bool] data_push_to_datafeeds: Whether the team can publish to data feeds. :param pulumi.Input[bool] dhcp_manage_dhcp: Whether the team can manage DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dhcp_view_dhcp: Whether the team can view DHCP. Only relevant for the DDI product. :param pulumi.Input[bool] dns_manage_zones: Whether the team can modify the accounts zones. :param pulumi.Input[bool] dns_view_zones: Whether the team can view the accounts zones. :param pulumi.Input[bool] dns_zones_allow_by_default: If true, enable the `dns_zones_allow` list, otherwise enable the `dns_zones_deny` list. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_allows: List of zones that the team may access. :param pulumi.Input[Sequence[pulumi.Input[str]]] dns_zones_denies: List of zones that the team may not access. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TeamIpWhitelistArgs']]]] ip_whitelists: Array of IP addresses objects to chich to grant the team access. Each object includes a **name** (string), and **values** (array of strings) associated to each "allow" list. :param pulumi.Input[bool] ipam_manage_ipam: Whether the team can manage IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] ipam_view_ipam: Whether the team can view IPAM. Only relevant for the DDI product. :param pulumi.Input[bool] monitoring_manage_jobs: Whether the team can modify monitoring jobs. :param pulumi.Input[bool] monitoring_manage_lists: Whether the team can modify notification lists. :param pulumi.Input[bool] monitoring_view_jobs: Whether the team can view monitoring jobs. :param pulumi.Input[str] name: The free form name of the team. :param pulumi.Input[bool] security_manage_active_directory: Whether the team can manage global active directory. Only relevant for the DDI product. :param pulumi.Input[bool] security_manage_global2fa: Whether the team can manage global two factor authentication. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TeamState.__new__(_TeamState) __props__.__dict__["account_manage_account_settings"] = account_manage_account_settings __props__.__dict__["account_manage_apikeys"] = account_manage_apikeys __props__.__dict__["account_manage_ip_whitelist"] = account_manage_ip_whitelist __props__.__dict__["account_manage_payment_methods"] = account_manage_payment_methods __props__.__dict__["account_manage_plan"] = account_manage_plan __props__.__dict__["account_manage_teams"] = account_manage_teams __props__.__dict__["account_manage_users"] = account_manage_users __props__.__dict__["account_view_activity_log"] = account_view_activity_log __props__.__dict__["account_view_invoices"] = account_view_invoices __props__.__dict__["data_manage_datafeeds"] = data_manage_datafeeds __props__.__dict__["data_manage_datasources"] = data_manage_datasources __props__.__dict__["data_push_to_datafeeds"] = data_push_to_datafeeds __props__.__dict__["dhcp_manage_dhcp"] = dhcp_manage_dhcp __props__.__dict__["dhcp_view_dhcp"] = dhcp_view_dhcp __props__.__dict__["dns_manage_zones"] = dns_manage_zones __props__.__dict__["dns_records_allows"] = dns_records_allows __props__.__dict__["dns_records_denies"] = dns_records_denies __props__.__dict__["dns_view_zones"] = dns_view_zones __props__.__dict__["dns_zones_allow_by_default"] = dns_zones_allow_by_default __props__.__dict__["dns_zones_allows"] = dns_zones_allows __props__.__dict__["dns_zones_denies"] = dns_zones_denies __props__.__dict__["ip_whitelists"] = ip_whitelists __props__.__dict__["ipam_manage_ipam"] = ipam_manage_ipam __props__.__dict__["ipam_view_ipam"] = ipam_view_ipam __props__.__dict__["monitoring_manage_jobs"] = monitoring_manage_jobs __props__.__dict__["monitoring_manage_lists"] = monitoring_manage_lists __props__.__dict__["monitoring_view_jobs"] = monitoring_view_jobs __props__.__dict__["name"] = name __props__.__dict__["security_manage_active_directory"] = security_manage_active_directory __props__.__dict__["security_manage_global2fa"] = security_manage_global2fa return Team(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="accountManageAccountSettings") def account_manage_account_settings(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify account settings. """ return pulumi.get(self, "account_manage_account_settings") @property @pulumi.getter(name="accountManageApikeys") def account_manage_apikeys(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify account apikeys. """ return pulumi.get(self, "account_manage_apikeys") @property @pulumi.getter(name="accountManageIpWhitelist") def account_manage_ip_whitelist(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can manage ip whitelist. """ return pulumi.get(self, "account_manage_ip_whitelist") @property @pulumi.getter(name="accountManagePaymentMethods") def account_manage_payment_methods(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify account payment methods. """ return pulumi.get(self, "account_manage_payment_methods") @property @pulumi.getter(name="accountManagePlan") def account_manage_plan(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify the account plan. """ return pulumi.get(self, "account_manage_plan") @property @pulumi.getter(name="accountManageTeams") def account_manage_teams(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify other teams in the account. """ return pulumi.get(self, "account_manage_teams") @property @pulumi.getter(name="accountManageUsers") def account_manage_users(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify account users. """ return pulumi.get(self, "account_manage_users") @property @pulumi.getter(name="accountViewActivityLog") def account_view_activity_log(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can view activity logs. """ return pulumi.get(self, "account_view_activity_log") @property @pulumi.getter(name="accountViewInvoices") def account_view_invoices(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can view invoices. """ return pulumi.get(self, "account_view_invoices") @property @pulumi.getter(name="dataManageDatafeeds") def data_manage_datafeeds(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify data feeds. """ return pulumi.get(self, "data_manage_datafeeds") @property @pulumi.getter(name="dataManageDatasources") def data_manage_datasources(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify data sources. """ return pulumi.get(self, "data_manage_datasources") @property @pulumi.getter(name="dataPushToDatafeeds") def data_push_to_datafeeds(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can publish to data feeds. """ return pulumi.get(self, "data_push_to_datafeeds") @property @pulumi.getter(name="dhcpManageDhcp") def dhcp_manage_dhcp(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can manage DHCP. Only relevant for the DDI product. """ return pulumi.get(self, "dhcp_manage_dhcp") @property @pulumi.getter(name="dhcpViewDhcp") def dhcp_view_dhcp(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can view DHCP. Only relevant for the DDI product. """ return pulumi.get(self, "dhcp_view_dhcp") @property @pulumi.getter(name="dnsManageZones") def dns_manage_zones(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify the accounts zones. """ return pulumi.get(self, "dns_manage_zones") @property @pulumi.getter(name="dnsRecordsAllows") def dns_records_allows(self) -> pulumi.Output[Optional[Sequence['outputs.TeamDnsRecordsAllow']]]: return pulumi.get(self, "dns_records_allows") @property @pulumi.getter(name="dnsRecordsDenies") def dns_records_denies(self) -> pulumi.Output[Optional[Sequence['outputs.TeamDnsRecordsDeny']]]: return pulumi.get(self, "dns_records_denies") @property @pulumi.getter(name="dnsViewZones") def dns_view_zones(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can view the accounts zones. """ return pulumi.get(self, "dns_view_zones") @property @pulumi.getter(name="dnsZonesAllowByDefault") def dns_zones_allow_by_default(self) -> pulumi.Output[Optional[bool]]: """ If true, enable the `dns_zones_allow` list, otherwise enable the `dns_zones_deny` list. """ return pulumi.get(self, "dns_zones_allow_by_default") @property @pulumi.getter(name="dnsZonesAllows") def dns_zones_allows(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of zones that the team may access. """ return pulumi.get(self, "dns_zones_allows") @property @pulumi.getter(name="dnsZonesDenies") def dns_zones_denies(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of zones that the team may not access. """ return pulumi.get(self, "dns_zones_denies") @property @pulumi.getter(name="ipWhitelists") def ip_whitelists(self) -> pulumi.Output[Optional[Sequence['outputs.TeamIpWhitelist']]]: """ Array of IP addresses objects to chich to grant the team access. Each object includes a **name** (string), and **values** (array of strings) associated to each "allow" list. """ return pulumi.get(self, "ip_whitelists") @property @pulumi.getter(name="ipamManageIpam") def ipam_manage_ipam(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can manage IPAM. Only relevant for the DDI product. """ return pulumi.get(self, "ipam_manage_ipam") @property @pulumi.getter(name="ipamViewIpam") def ipam_view_ipam(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can view IPAM. Only relevant for the DDI product. """ return pulumi.get(self, "ipam_view_ipam") @property @pulumi.getter(name="monitoringManageJobs") def monitoring_manage_jobs(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify monitoring jobs. """ return pulumi.get(self, "monitoring_manage_jobs") @property @pulumi.getter(name="monitoringManageLists") def monitoring_manage_lists(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can modify notification lists. """ return pulumi.get(self, "monitoring_manage_lists") @property @pulumi.getter(name="monitoringViewJobs") def monitoring_view_jobs(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can view monitoring jobs. """ return pulumi.get(self, "monitoring_view_jobs") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The free form name of the team. """ return pulumi.get(self, "name") @property @pulumi.getter(name="securityManageActiveDirectory") def security_manage_active_directory(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can manage global active directory. Only relevant for the DDI product. """ return pulumi.get(self, "security_manage_active_directory") @property @pulumi.getter(name="securityManageGlobal2fa") def security_manage_global2fa(self) -> pulumi.Output[Optional[bool]]: """ Whether the team can manage global two factor authentication. """ return pulumi.get(self, "security_manage_global2fa")
49.136952
281
0.677744
9,545
79,651
5.346674
0.028601
0.095485
0.091704
0.097347
0.969726
0.96714
0.964475
0.961555
0.960556
0.959478
0
0.001585
0.223889
79,651
1,620
282
49.167284
0.823967
0.25279
0
0.922175
1
0
0.147478
0.077068
0
0
0
0
0
1
0.167377
false
0.001066
0.007463
0.006397
0.275053
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
ff346eba158dd230e698b00181eaacb027574951
20,753
py
Python
scripts/fill_in_vcf.py
shohei-kojima/MEGAnE
be8142b2245ff3e01e889912c51a44f8659ac187
[ "MIT" ]
2
2022-03-30T00:59:56.000Z
2022-03-31T18:29:36.000Z
scripts/fill_in_vcf.py
shohei-kojima/MEGAnE
be8142b2245ff3e01e889912c51a44f8659ac187
[ "MIT" ]
null
null
null
scripts/fill_in_vcf.py
shohei-kojima/MEGAnE
be8142b2245ff3e01e889912c51a44f8659ac187
[ "MIT" ]
null
null
null
#!/usr/bin/env python ''' Author: Shohei Kojima @ RIKEN Copyright (c) 2020 RIKEN All Rights Reserved See file LICENSE for details. ''' import os,datetime,collections,gzip from multiprocessing import Pool import mmap,io import log,traceback def fill_in_ins(args, sample_id, params, bps, geno_orig): dir=args.sample_id_to_dir[sample_id] f=args.dirs[dir][2] reads=collections.Counter() with gzip.open(f, 'rt') as infile: for line in infile: ls=line.strip().split('\t') if float(ls[2]) < params.overhang_evalue_threshold: chr,tmp=ls[0].split(':', 1) if chr in args.chr: start,tmp=tmp.split('-', 1) end,lr,_=tmp.split('/', 2) bp=int(start) if lr == 'L' else int(end) me,_=ls[1].split(',', 1) if me in bps: if chr in bps[me]: if bp in bps[me][chr]: id=bps[me][chr][bp] if not sample_id in geno_orig[id]: reads[id] += 1 return [reads, sample_id] def fill_in_ins_mmap(args, sample_id, params, bps, geno_orig): dir=args.sample_id_to_dir[sample_id] f=args.dirs[dir][2] reads=collections.Counter() with open(f) as infile0: with mmap.mmap(infile0.fileno(), 0, access=mmap.ACCESS_READ) as mapped: with io.TextIOWrapper(gzip.GzipFile(fileobj=mapped)) as infile: for line in infile: ls=line.strip().split('\t') if float(ls[2]) < params.overhang_evalue_threshold: chr,tmp=ls[0].split(':', 1) if chr in args.chr: start,tmp=tmp.split('-', 1) end,lr,_=tmp.split('/', 2) bp=int(start) if lr == 'L' else int(end) me,_=ls[1].split(',', 1) if me in bps: if chr in bps[me]: if bp in bps[me][chr]: id=bps[me][chr][bp] if not sample_id in geno_orig[id]: reads[id] += 1 return [reads, sample_id] def fill_in_ins(args, params, filenames): log.logger.debug('started') try: # load chunk samples only_fill_in=False if args.input_scaffold is not None: sample_ids_chunk=set() for dir in args.dirs: sample_ids_chunk.add(args.dirs[dir][-1]) log.logger.debug('%d samples found in args.dirs' % len(sample_ids_chunk)) only_fill_in=True scaffold_f=args.input_scaffold else: scaffold_f=filenames.merged_vcf_ins # load scaffold geno_orig={} bps={} chrs_set=set() with gzip.open(scaffold_f) as infile: for line in infile: line=line.decode() if line[0] == '#': if '#CHROM' in line: hs=line.strip().split('\t') sample_ids=hs[9:] if args.input_scaffold is not None: sample_ids_set=set(sample_ids) not_found=[] found=[] for sample_id in sample_ids_chunk: if not sample_id in sample_ids_set: not_found.append(sample_id) else: found.append(sample_id) if len(not_found) >= 1: log.logger.error('Samples not found in the scaffold VCF = %s' % ';'.join(not_found)) log.logger.error('%d samples not found in the scaffold VCF. Please check if you specified correct files.' % len(not_found)) exit(1) log.logger.info('%d samples found in the scaffold VCF. %d will be analyzed.' % (len(found), len(found))) skip_n= len(sample_ids) - len(found) log.logger.info('%d samples in the scaffold VCF were not found in the chunk file. %d samples will be skipped.' % (skip_n, skip_n)) sample_ids=found else: ls=line.strip().split('\t') for info in ls[7].split(';'): if 'MEI=' in info: mes=info.replace('MEI=', '') elif '0END=' in info: end=info.replace('0END=', '') break for me in mes.split('|'): if not me in bps: bps[me]={} if not ls[0] in bps[me]: bps[me][ls[0]]={} for p in range(int(ls[1]), int(end) + 1): bps[me][ls[0]][p]=ls[2] geno_orig[ls[2]]={} for h,v in zip(hs[9:], ls[9:]): if not v == '0/0': if only_fill_in is True: if not h in sample_ids_chunk: continue geno_orig[ls[2]][h]=v chrs_set.add(ls[0]) if args.chr is None: log.logger.debug('args.chr was set.') args.chr=chrs_set sample_ids_n=len(sample_ids) if args.mmap is True: # fill-in, mmap bpss=[ bps.copy() for _ in range(args.p) ] print_chunk=10 if args.p == 1 else 5 chunk=0 with Pool(processes=args.p) as p: for n in range(0, sample_ids_n, args.p): end= n + args.p if end > sample_ids_n: end=sample_ids_n jobs=[] for sample_id,bps_each in zip(sample_ids[n:end], bpss): jobs.append(p.apply_async(fill_in_ins_mmap, (args, sample_id, params, bps_each, geno_orig))) res=[] for j in jobs: res.append(j.get()) for reads,sample_id in res: for id in reads: if reads[id] >= params.min_support_reads_ins: geno_orig[id][sample_id]='0/.' chunk += 1 if (chunk % print_chunk) == 0: log.logger.info('Adding missing genotypes, %d files processed...' % (chunk * args.p)) log.logger.info('Adding missing genotypes, %d files processed...' % end) else: # fill-in, single processed_n=0 for sample_id in sample_ids: dir=args.sample_id_to_dir[sample_id] f=args.dirs[dir][2] reads=collections.Counter() with gzip.open(f) as infile: for line in infile: ls=line.decode().strip().split('\t') if float(ls[2]) < params.overhang_evalue_threshold: chr,tmp=ls[0].split(':', 1) start,tmp=tmp.split('-', 1) end,lr,_=tmp.split('/', 2) bp=int(start) if lr == 'L' else int(end) me,_=ls[1].split(',', 1) if me in bps: if chr in bps[me]: if bp in bps[me][chr]: id=bps[me][chr][bp] if not sample_id in geno_orig[id]: reads[id] += 1 for id in reads: if reads[id] >= params.min_support_reads_ins: geno_orig[id][sample_id]='0/.' processed_n += 1 if (processed_n % params.processed_interval) == 0: log.logger.info('%d samples processed...' % processed_n) # output missing_line_added=False with gzip.open(filenames.filled_vcf_ins, 'wt') as outfile: with gzip.open(scaffold_f) as infile: for line in infile: line=line.decode() if line[0] == '#': if '##FILTER=<ID=' in line and missing_line_added is False: missing_line='##FILTER=<ID=M,Description="Too many missing genotypes">\n' outfile.write(missing_line) missing_line_added=True if '#CHROM' in line: ls=line.strip().split('\t') tmp=ls[:9] tmp.extend(sample_ids) line='\t'.join(tmp) +'\n' outfile.write(line) else: ls=line.split('\t', 10) tmp=ls[:9] zero=0 missing=0 ac=0 for sample_id in sample_ids: if sample_id in geno_orig[ls[2]]: v=geno_orig[ls[2]][sample_id] tmp.append(v) if v == '0/.': missing += 1 elif v == '1/1': ac += 2 else: ac += 1 else: tmp.append('0/0') zero += 1 if args.input_scaffold is None: change_to_nonpass=False if (ac / (sample_ids_n * 2)) < 0.05: if missing >= (ac * 2): change_to_nonpass=True elif missing >= ac or missing > (zero / 2): change_to_nonpass=True if change_to_nonpass is True: if tmp[6] == 'PASS': tmp[6]='M' else: tmp[6]='%s;M' % tmp[6] outfile.write('\t'.join(tmp) +'\n') except: log.logger.error('\n'+ traceback.format_exc()) exit(1) def fill_in_abs(args, sample_id, bps): dir=args.sample_id_to_dir[sample_id] f=args.dirs[dir][1] to_be_added=set() with gzip.open(f, 'rt') as infile: for line in infile: ls=line.strip().split('\t') chr=ls[1] if chr in args.chr: start=int(ls[2]) end=int(ls[3]) if chr in bps: if start in bps[chr]: if end in bps[chr]: if bps[chr][start] == bps[chr][end]: to_be_added.add(bps[chr][start]) return [to_be_added, sample_id] def fill_in_abs_mmap(args, sample_id, bps): dir=args.sample_id_to_dir[sample_id] f=args.dirs[dir][1] to_be_added=set() with open(f) as infile0: with mmap.mmap(infile0.fileno(), 0, access=mmap.ACCESS_READ) as mapped: with io.TextIOWrapper(gzip.GzipFile(fileobj=mapped)) as infile: for line in infile: ls=line.strip().split('\t') chr=ls[1] if chr in args.chr: start=int(ls[2]) end=int(ls[3]) if chr in bps: if start in bps[chr]: if end in bps[chr]: if bps[chr][start] == bps[chr][end]: to_be_added.add(bps[chr][start]) return [to_be_added, sample_id] def fill_in_abs(args, params, filenames): log.logger.debug('started') try: slop_len=params.slop_len_for_abs # load chunk samples only_fill_in=False if args.input_scaffold is not None: sample_ids_chunk=set() for dir in args.dirs: sample_ids_chunk.add(args.dirs[dir][-1]) log.logger.debug('%d samples found in args.dirs' % len(sample_ids_chunk)) only_fill_in=True scaffold_f=args.input_scaffold else: scaffold_f=filenames.merged_vcf_abs # load scaffold geno_orig={} bps={} chrs_set=set() with gzip.open(scaffold_f) as infile: for line in infile: line=line.decode() if line[0] == '#': if '#CHROM' in line: hs=line.strip().split('\t') sample_ids=hs[9:] if args.input_scaffold is not None: sample_ids_set=set(sample_ids) not_found=[] found=[] for sample_id in sample_ids_chunk: if not sample_id in sample_ids_set: not_found.append(sample_id) else: found.append(sample_id) if len(not_found) >= 1: log.logger.error('Samples not found in the scaffold VCF = %s' % ';'.join(not_found)) log.logger.error('%d samples not found in the scaffold VCF. Please check if you specified correct files.' % len(not_found)) exit(1) log.logger.info('%d samples found in the scaffold VCF. %d will be analyzed.' % (len(found), len(found))) skip_n= len(sample_ids) - len(found) log.logger.info('%d samples in the scaffold VCF were not found in the chunk file. %d samples will be skipped.' % (skip_n, skip_n)) sample_ids=found else: ls=line.strip().split('\t') for info in ls[7].split(';'): if '0END=' in info: end=info.replace('0END=', '') break if not ls[0] in bps: bps[ls[0]]={} for p in range(int(ls[1]) - slop_len, int(ls[1]) + slop_len): bps[ls[0]][p]=ls[2] for p in range(int(end) - slop_len, int(end) + slop_len): bps[ls[0]][p]=ls[2] geno_orig[ls[2]]={} for h,v in zip(hs[9:], ls[9:]): if not v == '0/0': if only_fill_in is True: if not h in sample_ids_chunk: continue geno_orig[ls[2]][h]=v chrs_set.add(ls[0]) if args.chr is None: log.logger.debug('args.chr was set.') args.chr=chrs_set sample_ids_n=len(sample_ids) if args.mmap is True: # fill-in, mmap bpss=[ bps.copy() for _ in range(args.p) ] print_chunk=10 if args.p == 1 else 5 chunk=0 with Pool(processes=args.p) as p: for n in range(0, sample_ids_n, args.p): end= n + args.p if end > sample_ids_n: end=sample_ids_n jobs=[] for sample_id,bps_each in zip(sample_ids[n:end], bpss): jobs.append(p.apply_async(fill_in_abs_mmap, (args, sample_id, bps_each))) res=[] for j in jobs: res.append(j.get()) for to_be_added,sample_id in res: for id in to_be_added: if not sample_id in geno_orig[id]: geno_orig[id][sample_id]='0/.' chunk += 1 if (chunk % print_chunk) == 0: log.logger.info('Adding missing genotypes, %d files processed...' % (chunk * args.p)) log.logger.info('Adding missing genotypes, %d files processed...' % end) else: # fill-in, single processed_n=0 for sample_id in sample_ids: dir=args.sample_id_to_dir[sample_id] f=args.dirs[dir][1] to_be_added=set() with gzip.open(f) as infile: for line in infile: ls=line.decode().strip().split('\t') chr=ls[1] start=int(ls[2]) end=int(ls[3]) if chr in bps: if start in bps[chr]: if end in bps[chr]: if bps[chr][start] == bps[chr][end]: to_be_added.add(bps[chr][start]) for id in to_be_added: if not sample_id in geno_orig[id]: geno_orig[id][sample_id]='0/.' processed_n += 1 if (processed_n % params.processed_interval) == 0: log.logger.info('%d samples processed...' % processed_n) # output missing_line_added=False with gzip.open(filenames.filled_vcf_abs, 'wt') as outfile: with gzip.open(scaffold_f) as infile: for line in infile: line=line.decode() if line[0] == '#': if '##FILTER=<ID=' in line and missing_line_added is False: missing_line='##FILTER=<ID=M,Description="Too many missing genotypes">\n' outfile.write(missing_line) missing_line_added=True if '#CHROM' in line: ls=line.strip().split('\t') tmp=ls[:9] tmp.extend(sample_ids) line='\t'.join(tmp) +'\n' outfile.write(line) else: ls=line.split('\t', 10) tmp=ls[:9] zero=0 missing=0 ac=0 for sample_id in sample_ids: if sample_id in geno_orig[ls[2]]: v=geno_orig[ls[2]][sample_id] tmp.append(v) if v == '0/.': missing += 1 elif v == '1/1': ac += 2 else: ac += 1 else: tmp.append('0/0') zero += 1 if args.input_scaffold is None: change_to_nonpass=False if (ac / (sample_ids_n * 2)) < 0.05: if missing >= (ac * 2): change_to_nonpass=True elif ((ac + missing) / (sample_ids_n * 2)) < 0.75: if missing >= ac or missing > zero: change_to_nonpass=True if change_to_nonpass is True: if tmp[6] == 'PASS': tmp[6]='M' else: tmp[6]='%s;M' % tmp[6] outfile.write('\t'.join(tmp) +'\n') except: log.logger.error('\n'+ traceback.format_exc()) exit(1)
45.312227
158
0.405146
2,283
20,753
3.535261
0.08629
0.050551
0.021063
0.018585
0.955396
0.9409
0.936935
0.933713
0.908314
0.899393
0
0.017343
0.494338
20,753
457
159
45.411379
0.751763
0.012769
0
0.910843
0
0.004819
0.059798
0.003029
0
0
0
0
0
1
0.014458
false
0.024096
0.009639
0
0.033735
0.009639
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
ff373818908b60bc440470aba8722a129ee4580a
1,601
py
Python
orms.py
akita8/risparmi
e3c0db91f5227b322c849cc5ac03847868fcf77b
[ "MIT" ]
null
null
null
orms.py
akita8/risparmi
e3c0db91f5227b322c849cc5ac03847868fcf77b
[ "MIT" ]
null
null
null
orms.py
akita8/risparmi
e3c0db91f5227b322c849cc5ac03847868fcf77b
[ "MIT" ]
null
null
null
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, Float, Date Base = declarative_base() class Bond(Base): __tablename__ = 'bond' id = Column(Integer, primary_key=True) symbol = Column(String, primary_key=True) denomination = Column(String) market = Column(String) sector = Column(String) currency = Column(String) isin = Column(String) nation = Column(String) transaction = Column(String) typology = Column(String) account = Column(String) quantity = Column(Integer) buy_sell_price = Column(Float) price_issued = Column(Float) coupon = Column(Float) commission = Column(Float) tax_percentage = Column(Float) exchange_rate = Column(Float) owner = Column(String) date_of_transaction = Column(Date) date_of_refund = Column(Date) date_of_issue = Column(Date) class Stock(Base): __tablename__ = 'stock' id = Column(Integer, primary_key=True) symbol = Column(String, primary_key=True) denomination = Column(String) market = Column(String) sector = Column(String) currency = Column(String) isin = Column(String) nation = Column(String) transaction = Column(String) tax_on_purchase_percentage = Column(Float) account = Column(String) quantity = Column(Integer) buy_sell_price = Column(Float) dividend = Column(Float) commission = Column(Float) tax_percentage = Column(Float) exchange_rate = Column(Float) owner = Column(String) date_of_transaction = Column(Date) # commento
27.135593
59
0.695191
184
1,601
5.86413
0.26087
0.23355
0.0519
0.040779
0.71177
0.71177
0.71177
0.71177
0.71177
0.71177
0
0
0.209244
1,601
58
60
27.603448
0.852291
0.004997
0
0.708333
0
0
0.005657
0
0
0
0
0
0
1
0
false
0
0.041667
0
0.979167
0
0
0
0
null
1
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
8
ff438b0e49ed9515d7af5552d5750f89f433ae6f
11,951
py
Python
dizoo/overcooked/envs/overcooked_env.py
sailxjx/DI-engine
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
[ "Apache-2.0" ]
464
2021-07-08T07:26:33.000Z
2022-03-31T12:35:16.000Z
dizoo/overcooked/envs/overcooked_env.py
sailxjx/DI-engine
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
[ "Apache-2.0" ]
177
2021-07-09T08:22:55.000Z
2022-03-31T07:35:22.000Z
dizoo/overcooked/envs/overcooked_env.py
sailxjx/DI-engine
c6763f8e2ba885a2a02f611195a1b5f8b50bff00
[ "Apache-2.0" ]
92
2021-07-08T12:16:37.000Z
2022-03-31T09:24:41.000Z
from namedlist import namedlist import numpy as np import gym from typing import Any, Union, List import copy from overcooked_ai_py.mdp.actions import Action, Direction from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS from ding.envs import BaseEnv, BaseEnvTimestep, BaseEnvInfo from ding.envs.common.env_element import EnvElement, EnvElementInfo from ding.utils import ENV_REGISTRY OvercookEnvTimestep = namedlist('OvercookEnvTimestep', ['obs', 'reward', 'done', 'info']) OvercookEnvInfo = namedlist('OvercookEnvInfo', ['agent_num', 'obs_space', 'act_space', 'rew_space']) # n, s = Direction.NORTH, Direction.SOUTH # e, w = Direction.EAST, Direction.WEST # stay, interact = Action.STAY, Action.INTERACT # Action.ALL_ACTIONS: [n, s, e, w, stay, interact] @ENV_REGISTRY.register('overcooked') class OvercookEnv(BaseEnv): def __init__(self, cfg) -> None: self._cfg = cfg self._env_name = cfg.get("env_name", "cramped_room") self._horizon = cfg.get("horizon", 400) self._concat_obs = cfg.get("concat_obs", False) self._action_mask = cfg.get("action_mask", True) self._use_shaped_reward = cfg.get("use_shaped_reward", True) self.mdp = OvercookedGridworld.from_layout_name(self._env_name) self.base_env = OvercookedEnv.from_mdp(self.mdp, horizon=self._horizon, info_level=0) featurize_fn = lambda mdp, state: mdp.lossless_state_encoding(state) self.featurize_fn = featurize_fn self.action_dim = len(Action.ALL_ACTIONS) self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS)) # rightnow overcook environment encoding only support 2 agent game self.agent_num = 2 # set up obs shape dummy_mdp = self.base_env.mdp dummy_state = dummy_mdp.get_standard_start_state() self.obs_shape = self.featurize_fn(dummy_mdp, dummy_state)[0].shape def seed(self, seed: int, dynamic_seed: bool = True) -> None: self._seed = seed self._dynamic_seed = dynamic_seed np.random.seed(self._seed) def close(self) -> None: # Note: the real env instance only has a empty close method, only pas pass def step(self, action): if isinstance(action, list): action = np.concatenate(action) assert all(self.action_space.contains(a) for a in action), "%r (%s) invalid" % (action, type(action)) agent_action, other_agent_action = [Action.INDEX_TO_ACTION[a] for a in action] if self.agent_idx == 0: joint_action = (agent_action, other_agent_action) else: joint_action = (other_agent_action, agent_action) next_state, reward, done, env_info = self.base_env.step(joint_action) if self._use_shaped_reward: reward += env_info['shaped_r_by_agent'][0] reward += env_info['shaped_r_by_agent'][1] reward = np.array([float(reward)]) self._final_eval_reward += reward ob_p0, ob_p1 = self.featurize_fn(self.mdp, next_state) if self.agent_idx == 0: both_agents_ob = [ob_p0, ob_p1] else: both_agents_ob = [ob_p1, ob_p0] if self._concat_obs: both_agents_ob = np.concatenate(both_agents_ob) else: both_agents_ob = np.stack(both_agents_ob) env_info["policy_agent_idx"] = self.agent_idx env_info["final_eval_reward"] = self._final_eval_reward action_mask = self.get_action_mask() if self._action_mask: obs = { "agent_state": both_agents_ob, "overcooked_state": self.base_env.state, "other_agent_env_idx": 1 - self.agent_idx, "action_mask": action_mask } else: obs = both_agents_ob return OvercookEnvTimestep(obs, reward, done, env_info) def reset(self): self.base_env.reset() self._final_eval_reward = 0 self.mdp = self.base_env.mdp # random init agent index self.agent_idx = np.random.choice([0, 1]) ob_p0, ob_p1 = self.featurize_fn(self.mdp, self.base_env.state) if self.agent_idx == 0: both_agents_ob = [ob_p0, ob_p1] else: both_agents_ob = [ob_p1, ob_p0] if self._concat_obs: both_agents_ob = np.concatenate(both_agents_ob) else: both_agents_ob = np.stack(both_agents_ob) action_mask = self.get_action_mask() if self._action_mask: obs = { "agent_state": both_agents_ob, "overcooked_state": self.base_env.state, "other_agent_env_idx": 1 - self.agent_idx, "action_mask": action_mask } else: obs = both_agents_ob return obs def get_available_actions(self): return self.mdp.get_actions(self.base_env.state) def get_action_mask(self): available_actions = self.get_available_actions() action_masks = np.zeros((2, self.action_dim)) for i in range(self.action_dim): if Action.INDEX_TO_ACTION[i] in available_actions[0]: action_masks[0][i] = 1 if Action.INDEX_TO_ACTION[i] in available_actions[1]: action_masks[1][i] = 1 return action_masks def info(self): T = EnvElementInfo if self._concat_obs: agent_state = list(self.obs_shape) agent_state[0] = agent_state[0] * 2 agent_state = tuple(agent_state) else: agent_state = (self.agent_num, self.obs_shape) env_info = OvercookEnvInfo( agent_num=self.agent_num, obs_space=T({ 'agent_state': agent_state, 'action_mask': (self.agent_num, self.action_dim), }, None), act_space=T((self.agent_num, self.action_dim), None), rew_space=T((1, ), None) ) return env_info def __repr__(self): pass @ENV_REGISTRY.register('overcooked_game') class OvercookGameEnv(BaseEnv): def __init__(self, cfg) -> None: self._cfg = cfg self._env_name = cfg.get("env_name", "cramped_room") self._horizon = cfg.get("horizon", 400) self._concat_obs = cfg.get("concat_obs", False) self._action_mask = cfg.get("action_mask", False) self._use_shaped_reward = cfg.get("use_shaped_reward", True) self.mdp = OvercookedGridworld.from_layout_name(self._env_name) self.base_env = OvercookedEnv.from_mdp(self.mdp, horizon=self._horizon, info_level=0) featurize_fn = lambda mdp, state: mdp.lossless_state_encoding(state) self.featurize_fn = featurize_fn self.action_dim = len(Action.ALL_ACTIONS) self.action_space = gym.spaces.Discrete(len(Action.ALL_ACTIONS)) # rightnow overcook environment encoding only support 2 agent game self.agent_num = 2 # set up obs shape dummy_mdp = self.base_env.mdp dummy_state = dummy_mdp.get_standard_start_state() self.obs_shape = self.featurize_fn(dummy_mdp, dummy_state)[0].shape def seed(self, seed: int, dynamic_seed: bool = True) -> None: self._seed = seed self._dynamic_seed = dynamic_seed np.random.seed(self._seed) def close(self) -> None: # Note: the real env instance only has a empty close method, only pas pass def step(self, action): if isinstance(action, list): action = np.array(action).astype(np.int) if action.shape == (2, 1): action = [action[0][0], action[1][0]] assert all(self.action_space.contains(a) for a in action), "%r (%s) invalid" % (action, type(action)) agent_action, other_agent_action = [Action.INDEX_TO_ACTION[a] for a in action] if self.agent_idx == 0: joint_action = (agent_action, other_agent_action) else: joint_action = (other_agent_action, agent_action) next_state, reward, done, env_info = self.base_env.step(joint_action) reward = np.array([float(reward)]) self._final_eval_reward += reward if self._use_shaped_reward: self._final_eval_reward += env_info['shaped_r_by_agent'][0] self._final_eval_reward += env_info['shaped_r_by_agent'][1] rewards = np.array([reward, reward]).astype(np.float32) if self._use_shaped_reward: rewards[0] += env_info['shaped_r_by_agent'][0] rewards[1] += env_info['shaped_r_by_agent'][1] ob_p0, ob_p1 = self.featurize_fn(self.mdp, next_state) if self.agent_idx == 0: both_agents_ob = [ob_p0, ob_p1] else: both_agents_ob = [ob_p1, ob_p0] if self._concat_obs: both_agents_ob = np.concatenate(both_agents_ob) else: both_agents_ob = np.stack(both_agents_ob) env_info["policy_agent_idx"] = self.agent_idx env_info["final_eval_reward"] = self._final_eval_reward action_mask = self.get_action_mask() if self._action_mask: obs = { "agent_state": both_agents_ob, "overcooked_state": self.base_env.state, "other_agent_env_idx": 1 - self.agent_idx, "action_mask": action_mask } else: obs = both_agents_ob return OvercookEnvTimestep(obs, rewards, done, [env_info, env_info]) def reset(self): self.base_env.reset() self._final_eval_reward = 0 self.mdp = self.base_env.mdp # random init agent index self.agent_idx = np.random.choice([0, 1]) #fix init agent index self.agent_idx = 0 ob_p0, ob_p1 = self.featurize_fn(self.mdp, self.base_env.state) if self.agent_idx == 0: both_agents_ob = [ob_p0, ob_p1] else: both_agents_ob = [ob_p1, ob_p0] if self._concat_obs: both_agents_ob = np.concatenate(both_agents_ob) else: both_agents_ob = np.stack(both_agents_ob) action_mask = self.get_action_mask() if self._action_mask: obs = { "agent_state": both_agents_ob, "overcooked_state": self.base_env.state, "other_agent_env_idx": 1 - self.agent_idx, "action_mask": action_mask } else: obs = both_agents_ob return obs def get_available_actions(self): return self.mdp.get_actions(self.base_env.state) def get_action_mask(self): available_actions = self.get_available_actions() action_masks = np.zeros((2, self.action_dim)) for i in range(self.action_dim): if Action.INDEX_TO_ACTION[i] in available_actions[0]: action_masks[0][i] = 1 if Action.INDEX_TO_ACTION[i] in available_actions[1]: action_masks[1][i] = 1 return action_masks def info(self): T = EnvElementInfo if self._concat_obs: agent_state = list(self.obs_shape) agent_state[0] = agent_state[0] * 2 agent_state = tuple(agent_state) else: agent_state = (self.agent_num, self.obs_shape) env_info = OvercookEnvInfo( agent_num=self.agent_num, obs_space=T({ 'agent_state': agent_state, 'action_mask': (self.agent_num, self.action_dim), }, None), act_space=T((self.agent_num, self.action_dim), None), rew_space=T((1, ), None) ) return env_info def __repr__(self): return "DI-engine Overcooked GameEnv"
37.700315
129
0.622542
1,579
11,951
4.385687
0.11083
0.046209
0.055451
0.021949
0.863827
0.851697
0.838989
0.83278
0.824404
0.824404
0
0.010771
0.27755
11,951
317
130
37.700315
0.79129
0.045185
0
0.84375
0
0
0.064924
0
0
0
0
0
0.007813
1
0.070313
false
0.011719
0.042969
0.011719
0.164063
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
ff4f03a22a53623c1143d47f10543a54cb1d64dc
139
py
Python
napari_feature_visualization/_tests/test_function.py
haesleinhuepf/napari-feature-visualization
b3b13ddadfa6af9530146967ace5e6ee787154e6
[ "BSD-3-Clause" ]
6
2021-04-26T08:47:10.000Z
2022-01-30T12:39:05.000Z
napari_feature_visualization/_tests/test_function.py
haesleinhuepf/napari-feature-visualization
b3b13ddadfa6af9530146967ace5e6ee787154e6
[ "BSD-3-Clause" ]
14
2021-04-30T14:15:18.000Z
2022-02-06T21:33:51.000Z
napari_feature_visualization/_tests/test_function.py
haesleinhuepf/napari-feature-visualization
b3b13ddadfa6af9530146967ace5e6ee787154e6
[ "BSD-3-Clause" ]
3
2021-05-02T13:46:15.000Z
2021-06-02T13:37:41.000Z
from napari_feature_visualization import feature_vis # TODO: Find out how to write test cases for plugins def test_placeholder(): pass
27.8
52
0.805755
21
139
5.142857
0.904762
0
0
0
0
0
0
0
0
0
0
0
0.158273
139
4
53
34.75
0.923077
0.359712
0
0
0
0
0
0
0
0
0
0.25
0
1
0.333333
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
1
1
1
0
1
0
0
8
ff927e2b5d6ba969f52e5623a2fbf9bd51bde6c9
118
py
Python
test/run/t420.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
2,671
2015-01-03T08:23:25.000Z
2022-03-31T06:15:48.000Z
test/run/t420.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
972
2015-01-05T08:11:00.000Z
2022-03-29T13:47:15.000Z
test/run/t420.py
timmartin/skulpt
2e3a3fbbaccc12baa29094a717ceec491a8a6750
[ "MIT" ]
845
2015-01-03T19:53:36.000Z
2022-03-29T18:34:22.000Z
print range(10) print range(1,10) print range(0,10,2) print range(0,-10,-1) print range(0,-10,2) print range(-10,0,2)
16.857143
21
0.686441
27
118
3
0.222222
0.740741
0.407407
0.481481
0.592593
0.592593
0.592593
0
0
0
0
0.198113
0.101695
118
6
22
19.666667
0.566038
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0
null
null
1
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
7
444af393701909cb4e64e9d48fe39e4b678bc889
186
py
Python
espresso_orm/__init__.py
voathnak/espresso-orm
bcf822088ca2ad984555d2e3910b37a395bef86d
[ "MIT" ]
null
null
null
espresso_orm/__init__.py
voathnak/espresso-orm
bcf822088ca2ad984555d2e3910b37a395bef86d
[ "MIT" ]
null
null
null
espresso_orm/__init__.py
voathnak/espresso-orm
bcf822088ca2ad984555d2e3910b37a395bef86d
[ "MIT" ]
null
null
null
from .fields import Fields from .fields import String from .fields import Integer from .fields import Floor from .fields import One2many from .Models import Models from . import Records
23.25
28
0.811828
27
186
5.592593
0.333333
0.331126
0.529801
0
0
0
0
0
0
0
0
0.006329
0.150538
186
8
29
23.25
0.949367
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
447fae76c7d9798edca23fa142e4a3fd85b971c1
9,533
py
Python
DQM/HLTEvF/test/hlt_gen_dqm_gui_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
DQM/HLTEvF/test/hlt_gen_dqm_gui_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
DQM/HLTEvF/test/hlt_gen_dqm_gui_cfg.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms process = cms.Process("DQM") process.load("DQM.HLTEvF.HLTMonitor_cff") process.load("DQM.HLTEvF.HLTEventInfoClient_cff") process.load("DQMServices.Core.DQM_cfg") process.load("DQMServices.Components.DQMEnvironment_cfi") process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring('file:HLTFromDigiRaw.root') ) process.MessageLogger = cms.Service("MessageLogger", detailedInfo = cms.untracked.PSet( threshold = cms.untracked.string('INFO') ), critical = cms.untracked.PSet( threshold = cms.untracked.string('ERROR') ), debugModules = cms.untracked.vstring('*'), cout = cms.untracked.PSet( threshold = cms.untracked.string('WARNING'), WARNING = cms.untracked.PSet( limit = cms.untracked.int32(0) ), noLineBreaks = cms.untracked.bool(True) ), destinations = cms.untracked.vstring('detailedInfo', 'critical', 'cout') ) process.dumpcont = cms.EDAnalyzer("EventContentAnalyzer") process.p = cms.EndPath(process.dqmEnv*process.dqmSaver) process.PoolSource.fileNames = ['file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root', 'file:/tmp/bjbloom/0491EAAC-DF19-DD11-AECD-000423D98950.root'] process.DQMStore.verbose = 0 process.DQM.collectorHost = 'srv-c2d05-12' process.DQM.collectorPort = 9190 process.dqmSaver.convention = 'Online' process.dqmSaver.dirName = '.' process.dqmSaver.producer = 'DQM' process.dqmEnv.subSystemFolder = 'HLT' process.dqmSaver.saveByRun = -1 process.dqmSaver.saveAtJobEnd = False
123.805195
347
0.770691
1,291
9,533
5.687839
0.061967
0.119161
0.238322
0.374506
0.868719
0.868719
0.868719
0.851151
0.851151
0.851151
0
0.264076
0.049827
9,533
76
348
125.434211
0.546589
0
0
0.402985
0
0
0.800881
0.789131
0
0
0
0
0
1
0
false
0
0.014925
0
0.014925
0
0
0
0
null
0
1
1
1
1
1
1
1
1
0
1
0
0
0
0
1
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
12
92675da721f25d00fcb428192112c3a2ee962287
2,493
py
Python
hub/migrations/0018_auto_20211104_1348.py
AtakanAytar/Django-Restaurant-app
30d7e1e4ceaec049858a4199d86783aa214edc16
[ "MIT" ]
null
null
null
hub/migrations/0018_auto_20211104_1348.py
AtakanAytar/Django-Restaurant-app
30d7e1e4ceaec049858a4199d86783aa214edc16
[ "MIT" ]
null
null
null
hub/migrations/0018_auto_20211104_1348.py
AtakanAytar/Django-Restaurant-app
30d7e1e4ceaec049858a4199d86783aa214edc16
[ "MIT" ]
null
null
null
# Generated by Django 3.2.8 on 2021-11-04 10:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('hub', '0017_auto_20211029_1416'), ] operations = [ migrations.AlterField( model_name='branch_info', name='branch_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='branch_info', name='restaurant_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='menu_item', name='branch_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='menu_item', name='restaurant_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='message', name='branch_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='message', name='restaurant_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='order', name='branch_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='order', name='restaurant_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='qr_link_resolve', name='branch_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='qr_link_resolve', name='restaurant_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='room', name='branch_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='room', name='restaurant_id', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='user_info', name='branch_name', field=models.CharField(max_length=500), ), migrations.AlterField( model_name='user_info', name='restaurant_id', field=models.CharField(max_length=500), ), ]
29.678571
51
0.546731
235
2,493
5.565957
0.2
0.214067
0.267584
0.310398
0.880734
0.880734
0.880734
0.84633
0.84633
0.84633
0
0.044485
0.341757
2,493
83
52
30.036145
0.75259
0.018051
0
0.896104
1
0
0.123467
0.009403
0
0
0
0
0
1
0
false
0
0.012987
0
0.051948
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
11
928352bc32d386e5662b3a5b82509d3c031b85a5
100
py
Python
napari_arboretum/_hookimpls.py
dstansby/arboretum
697598d600312ae527a1fb3f08021307feeeb571
[ "MIT" ]
3
2021-12-29T16:48:00.000Z
2022-03-31T09:19:55.000Z
napari_arboretum/_hookimpls.py
dstansby/arboretum
697598d600312ae527a1fb3f08021307feeeb571
[ "MIT" ]
25
2021-11-30T11:29:06.000Z
2022-03-31T13:07:13.000Z
napari_arboretum/_hookimpls.py
dstansby/arboretum
697598d600312ae527a1fb3f08021307feeeb571
[ "MIT" ]
4
2021-12-21T00:51:26.000Z
2022-03-09T15:55:45.000Z
from .plugin import Arboretum def napari_experimental_provide_dock_widget(): return Arboretum
16.666667
46
0.82
12
100
6.5
0.916667
0
0
0
0
0
0
0
0
0
0
0
0.14
100
5
47
20
0.906977
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
1
1
1
0
0
7
928761ad6353f5c1aea1aae1433f5118a0eb7607
274
py
Python
udhrpy/__init__.py
uiuc-sst/udhr
285ccc60d2e5197c641d4abb32bea20337d8f006
[ "MIT" ]
null
null
null
udhrpy/__init__.py
uiuc-sst/udhr
285ccc60d2e5197c641d4abb32bea20337d8f006
[ "MIT" ]
null
null
null
udhrpy/__init__.py
uiuc-sst/udhr
285ccc60d2e5197c641d4abb32bea20337d8f006
[ "MIT" ]
null
null
null
from udhrpy.udhr_dataset import UDHR_Dataset from udhrpy.prepare_data import load_audio from udhrpy.prepare_data import load_text from udhrpy.prepare_data import load_phones from udhrpy.prepare_data import create_hdf5 from udhrpy.prepare_data import load_dict_from_txtfile
34.25
54
0.886861
44
274
5.204545
0.340909
0.262009
0.371179
0.458515
0.659389
0.541485
0
0
0
0
0
0.004016
0.091241
274
7
55
39.142857
0.915663
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
928b46c886c802dc518fb854a9cdef7b61e301f2
86
py
Python
addons14/base_tier_validation_formula/models/__init__.py
odoochain/addons_oca
55d456d798aebe16e49b4a6070765f206a8885ca
[ "MIT" ]
1
2021-06-10T14:59:13.000Z
2021-06-10T14:59:13.000Z
addons14/base_tier_validation_formula/models/__init__.py
odoochain/addons_oca
55d456d798aebe16e49b4a6070765f206a8885ca
[ "MIT" ]
null
null
null
addons14/base_tier_validation_formula/models/__init__.py
odoochain/addons_oca
55d456d798aebe16e49b4a6070765f206a8885ca
[ "MIT" ]
1
2021-04-09T09:44:44.000Z
2021-04-09T09:44:44.000Z
from . import tier_definition from . import tier_validation from . import tier_review
21.5
29
0.825581
12
86
5.666667
0.5
0.441176
0.617647
0
0
0
0
0
0
0
0
0
0.139535
86
3
30
28.666667
0.918919
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
92a921089354527eb0c9398f452b67fbf03bb615
63
py
Python
tests/__init__.py
MrSnowball-dev/telegraph
da629de7c00c3b8b0c7ab8ef4bf23caf419a3c6c
[ "MIT" ]
null
null
null
tests/__init__.py
MrSnowball-dev/telegraph
da629de7c00c3b8b0c7ab8ef4bf23caf419a3c6c
[ "MIT" ]
null
null
null
tests/__init__.py
MrSnowball-dev/telegraph
da629de7c00c3b8b0c7ab8ef4bf23caf419a3c6c
[ "MIT" ]
null
null
null
from . import test_html_converter from . import test_telegraph
21
33
0.84127
9
63
5.555556
0.666667
0.4
0.56
0
0
0
0
0
0
0
0
0
0.126984
63
2
34
31.5
0.909091
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
2ba461f988bac4cedbb2244767fb9308f731d621
327
py
Python
packages/gtmapi/lmsrvcore/middleware/__init__.py
gigabackup/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
60
2018-09-26T15:46:00.000Z
2021-10-10T02:37:14.000Z
packages/gtmapi/lmsrvcore/middleware/__init__.py
gigabackup/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
1,706
2018-09-26T16:11:22.000Z
2021-08-20T13:37:59.000Z
packages/gtmapi/lmsrvcore/middleware/__init__.py
griffinmilsap/gigantum-client
70fe6b39b87b1c56351f2b4c551b6f1693813e4f
[ "MIT" ]
11
2019-03-14T13:23:51.000Z
2022-01-25T01:29:16.000Z
from lmsrvcore.middleware.authorization import AuthorizationMiddleware from lmsrvcore.middleware.dataloader import DataloaderMiddleware from lmsrvcore.middleware.error import error_middleware from lmsrvcore.middleware.metric import time_all_resolvers_middleware from lmsrvcore.middleware.cache import RepositoryCacheMiddleware
54.5
70
0.908257
34
327
8.617647
0.441176
0.221843
0.392491
0.225256
0
0
0
0
0
0
0
0
0.061162
327
5
71
65.4
0.954397
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
7
a61100fca55c4f3b8cdf2f47f7bb92303f4fd510
235
py
Python
dataset/__init__.py
gloriatao/mcgproject
d085d9bc978bd086eb4dc9d28c7821eed401be70
[ "MIT" ]
1
2022-01-22T00:59:24.000Z
2022-01-22T00:59:24.000Z
dataset/__init__.py
gloriatao/mcgproject
d085d9bc978bd086eb4dc9d28c7821eed401be70
[ "MIT" ]
null
null
null
dataset/__init__.py
gloriatao/mcgproject
d085d9bc978bd086eb4dc9d28c7821eed401be70
[ "MIT" ]
null
null
null
from dataset.load_mcg_train import load_mcg_train from dataset.load_mcg_test import load_mcg_test from dataset.load_mcg_test_ecg_lstm import load_mcg_test_ecg_lstm from dataset.load_mcg_train_ecg_lstm import load_mcg_train_ecg_lstm
29.375
67
0.902128
44
235
4.272727
0.204545
0.297872
0.319149
0.382979
0.81383
0
0
0
0
0
0
0
0.080851
235
7
68
33.571429
0.87037
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
1
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
8