hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
431b1c493a00103278310911aca99b43b068ecff
| 127
|
py
|
Python
|
app/app/calc.py
|
cyberpunk-akash/recipe-app-api
|
3b7dcf86de7bdfcbb43ba52f1b34f3c6499580f7
|
[
"MIT"
] | 1
|
2021-10-31T08:49:32.000Z
|
2021-10-31T08:49:32.000Z
|
app/app/calc.py
|
cyberpunk-akash/recipe-app-api
|
3b7dcf86de7bdfcbb43ba52f1b34f3c6499580f7
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
cyberpunk-akash/recipe-app-api
|
3b7dcf86de7bdfcbb43ba52f1b34f3c6499580f7
|
[
"MIT"
] | null | null | null |
def add(x, y):
"""Adds two numbers"""
return x+y
def subtract(x, y):
"""Subtracts two numbers"""
return x-y
| 12.7
| 31
| 0.551181
| 20
| 127
| 3.5
| 0.5
| 0.114286
| 0.457143
| 0.485714
| 0.514286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275591
| 127
| 9
| 32
| 14.111111
| 0.76087
| 0.299213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
4339518ff01be0ade16aab15adeb4a92eb631dbf
| 46
|
py
|
Python
|
problem_333/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_333/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
problem_333/__init__.py
|
oltionzefi/daily-coding-problem
|
4fe3ec53e1f3c7d299849671fdfead462d548cd3
|
[
"MIT"
] | null | null | null |
from .problem_333 import knows, get_celebrity
| 23
| 45
| 0.847826
| 7
| 46
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0.108696
| 46
| 1
| 46
| 46
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4341db6eaaad6601802ef5012def3f205a0e68d6
| 7,756
|
py
|
Python
|
j_factor_pca_ica_euclidean_tsne.py
|
LeBronLiHD/ZJU2021_MedicineAI_CourseProject
|
d19253ace2725545b8eff02ccae957278d6a3402
|
[
"MIT"
] | null | null | null |
j_factor_pca_ica_euclidean_tsne.py
|
LeBronLiHD/ZJU2021_MedicineAI_CourseProject
|
d19253ace2725545b8eff02ccae957278d6a3402
|
[
"MIT"
] | null | null | null |
j_factor_pca_ica_euclidean_tsne.py
|
LeBronLiHD/ZJU2021_MedicineAI_CourseProject
|
d19253ace2725545b8eff02ccae957278d6a3402
|
[
"MIT"
] | 1
|
2021-11-13T13:26:13.000Z
|
2021-11-13T13:26:13.000Z
|
# -*- coding: utf-8 -*-
"""
Others algorithms to find the feature distribution
1. factor analysis
2. PCA
3. Fast ICA
4. euclidean distance
5. TSNE
"""
import seaborn as sns
from sklearn import decomposition
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.manifold import TSNE
import pandas
import f_load_data
import f_parameters
import f_preprocess
from sklearn.utils import shuffle
import f_single_feature_distribution
import matplotlib.pyplot as plt
def factor_analysis(data, important):
data = data.sample(frac=f_parameters.SAMPLE_RATIO).reset_index(drop=True)
print("data.shape ->", data.shape)
print("important[0] ->", important[0])
important_copy = []
for i in range(len(important[0])):
important_copy.append(important[0][i])
important_copy.append(data.shape[1] - 1)
select_col = []
for i in range(len(important_copy)):
select_col.append(data.columns[important_copy[i]])
data_selected = pandas.DataFrame(data, columns=select_col)
data_selected = f_preprocess.data_normalization(data_selected, have_target=True)
print("data_selected.shape ->", data_selected.shape)
print("data_selected.columns ->", data_selected.columns)
model = decomposition.FactorAnalysis(n_components=f_parameters.N_COMPONENTS)
X = model.fit_transform(data_selected.iloc[:, :-1].values)
pos = pandas.DataFrame()
pos['X'] = X[:, 0]
pos['Y'] = X[:, 1]
target = data.columns[data.shape[1] - 1]
pos[target] = data_selected[target]
axis = pos[pos[target] == 0].plot(kind='scatter', x='X', y='Y', color='green', label=0)
pos[pos[target] == 1].plot(kind='scatter', x='X', y='Y', color='red', label=1, ax=axis)
plt.title("factor_analysis")
plt.show()
def PCA(data, important):
data = data.sample(frac=f_parameters.SAMPLE_RATIO).reset_index(drop=True)
print("data.shape ->", data.shape)
print("important[0] ->", important[0])
important_copy = []
for i in range(len(important[0])):
important_copy.append(important[0][i])
important_copy.append(data.shape[1] - 1)
select_col = []
for i in range(len(important_copy)):
select_col.append(data.columns[important_copy[i]])
data_selected = pandas.DataFrame(data, columns=select_col)
data_selected = f_preprocess.data_normalization(data_selected, have_target=True)
print("data_selected.shape ->", data_selected.shape)
print("data_selected.columns ->", data_selected.columns)
model = decomposition.PCA(n_components=f_parameters.N_COMPONENTS_S)
X = model.fit_transform(data_selected.iloc[:, :-1])
pos = pandas.DataFrame()
pos['X'] = X[:, 0]
pos['Y'] = X[:, 1]
target = data.columns[data.shape[1] - 1]
pos[target] = data_selected[target]
axis = pos[pos[target] == 0].plot(kind='scatter', x='X', y='Y', color='blue', label=0)
pos[pos[target] == 1].plot(kind='scatter', x='X', y='Y', color='red', label=1, ax=axis)
print("explained_variance_ratio_ ->", model.fit(data_selected.iloc[:, :-1].values).explained_variance_ratio_)
plt.title("PCA")
plt.show()
def FastICA(data, important):
data = data.sample(frac=f_parameters.SAMPLE_RATIO).reset_index(drop=True)
print("data.shape ->", data.shape)
print("important[0] ->", important[0])
important_copy = []
for i in range(len(important[0])):
important_copy.append(important[0][i])
important_copy.append(data.shape[1] - 1)
select_col = []
for i in range(len(important_copy)):
select_col.append(data.columns[important_copy[i]])
data_selected = pandas.DataFrame(data, columns=select_col)
data_selected = f_preprocess.data_normalization(data_selected, have_target=True)
print("data_selected.shape ->", data_selected.shape)
print("data_selected.columns ->", data_selected.columns)
model = decomposition.FastICA(n_components=f_parameters.N_COMPONENTS)
X = model.fit_transform(data_selected.iloc[:, :-1])
pos = pandas.DataFrame()
pos['X'] = X[:, 0]
pos['Y'] = X[:, 1]
target = data.columns[data.shape[1] - 1]
pos[target] = data_selected[target]
axis = pos[pos[target] == 0].plot(kind='scatter', x='X', y='Y', color='orange', label=0)
pos[pos[target] == 1].plot(kind='scatter', x='X', y='Y', color='red', label=1, ax=axis)
plt.title("FastICA")
plt.show()
def euclidean(data, important):
data = data.sample(frac=f_parameters.SAMPLE_RATIO).reset_index(drop=True)
print("data.shape ->", data.shape)
print("important[0] ->", important[0])
important_copy = []
for i in range(len(important[0])):
important_copy.append(important[0][i])
important_copy.append(data.shape[1] - 1)
select_col = []
for i in range(len(important_copy)):
select_col.append(data.columns[important_copy[i]])
data_selected = pandas.DataFrame(data, columns=select_col)
data_selected = f_preprocess.data_normalization(data_selected, have_target=True)
print("data_selected.shape ->", data_selected.shape)
print("data_selected.columns ->", data_selected.columns)
similarities = euclidean_distances(data_selected.iloc[:, :-1].values)
model = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, dissimilarity="precomputed", n_jobs=1)
X = model.fit(similarities).embedding_
pos = pandas.DataFrame(X, columns=['X', 'Y'])
pos['X'] = X[:, 0]
pos['Y'] = X[:, 1]
target = data.columns[data.shape[1] - 1]
pos[target] = data_selected[target]
axis = pos[pos[target] == 0].plot(kind='scatter', x='X', y='Y', color='cyan', label=0)
pos[pos[target] == 1].plot(kind='scatter', x='X', y='Y', color='red', label=1, ax=axis)
plt.title("euclidean_distances")
plt.show()
def tSNE(data, important):
data = data.sample(frac=f_parameters.SAMPLE_RATIO).reset_index(drop=True)
print("data.shape ->", data.shape)
print("important[0] ->", important[0])
important_copy = []
for i in range(len(important[0])):
important_copy.append(important[0][i])
important_copy.append(data.shape[1] - 1)
select_col = []
for i in range(len(important_copy)):
select_col.append(data.columns[important_copy[i]])
data_selected = pandas.DataFrame(data, columns=select_col)
data_selected = f_preprocess.data_normalization(data_selected, have_target=True)
print("data_selected.shape ->", data_selected.shape)
print("data_selected.columns ->", data_selected.columns)
date_embedded = TSNE(n_components=2).fit_transform(data_selected.iloc[:, :-1])
pos = pandas.DataFrame(date_embedded, columns=['X', 'Y'])
target = data.columns[data.shape[1] - 1]
pos[target] = data_selected[target]
axis = pos[pos[target] == 0].plot(kind='scatter', x='X', y='Y', color='fuchsia', label=0)
pos[pos[target] == 1].plot(kind='scatter', x='X', y='Y', color='red', label=1, ax=axis)
plt.title("tSNE")
plt.show()
if __name__ == '__main__':
path = f_parameters.DATA_PATH
end_off, merge, end_off_feature, merge_feature, end_off_target, merge_target = f_load_data.f_load_data(path,
test_mode=True)
# end_off, merge, end_off_feature, merge_feature = \
# f_preprocess.data_cleaning(end_off), f_preprocess.data_cleaning(merge), \
# f_preprocess.data_cleaning(end_off_feature), f_preprocess.data_cleaning(merge_feature)
important, important_h = f_single_feature_distribution.single_feature(end_off, end_off_feature, end_off_target, False)
factor_analysis(end_off, important_h)
PCA(end_off, important_h)
FastICA(end_off, important_h)
euclidean(end_off, important_h)
tSNE(end_off, important_h)
| 43.573034
| 122
| 0.679345
| 1,088
| 7,756
| 4.64614
| 0.117647
| 0.109199
| 0.05638
| 0.0455
| 0.769337
| 0.746588
| 0.728586
| 0.728586
| 0.714342
| 0.705045
| 0
| 0.013724
| 0.163873
| 7,756
| 177
| 123
| 43.819209
| 0.765767
| 0.046545
| 0
| 0.682119
| 0
| 0
| 0.08234
| 0.017606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033113
| false
| 0
| 0.384106
| 0
| 0.417219
| 0.139073
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
4a325af9bcd696a032a1362f7f7bc822a11ac7c9
| 43
|
py
|
Python
|
mindconnectiot/__init__.py
|
Addono/mindconnect-iot-extension-python
|
85231989e62ccb96b9b6df3433c5fe737f047d1e
|
[
"MIT"
] | null | null | null |
mindconnectiot/__init__.py
|
Addono/mindconnect-iot-extension-python
|
85231989e62ccb96b9b6df3433c5fe737f047d1e
|
[
"MIT"
] | null | null | null |
mindconnectiot/__init__.py
|
Addono/mindconnect-iot-extension-python
|
85231989e62ccb96b9b6df3433c5fe737f047d1e
|
[
"MIT"
] | null | null | null |
from .mindconnectiot import MindConnectIot
| 21.5
| 42
| 0.883721
| 4
| 43
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a5adbb6bc303d9fcb5fe4b407d41eab93b35354
| 18,169
|
py
|
Python
|
bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | 1
|
2018-06-29T17:53:28.000Z
|
2018-06-29T17:53:28.000Z
|
bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | null | null | null |
bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py
|
jo2y/google-cloud-python
|
1b76727be16bc4335276f793340bb72d32be7166
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2
from google.cloud.bigtable_admin_v2.proto import table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.CreateTableFromSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListTables = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListTables',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
)
self.GetTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DeleteTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyColumnFamilies = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString,
)
self.DropRowRange = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GenerateConsistencyToken = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
)
self.CheckConsistency = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
)
self.SnapshotTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString,
)
self.ListSnapshots = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
)
self.DeleteSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot',
request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class BigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTableFromSnapshot(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Creates a new table from the specified snapshot. The target table must
not exist. The snapshot and the table must be in the same instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyColumnFamilies(self, request, context):
"""Performs a series of column family modifications on the specified table.
Either all or none of the modifications will occur before this method
returns, but data requests received prior to that point may see a table
where only some modifications have taken effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateConsistencyToken(self, request, context):
"""This is a private alpha release of Cloud Bigtable replication. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Generates a consistency token for a Table, which can be used in
CheckConsistency to check whether mutations to the table that finished
before this call started have been replicated. The tokens will be available
for 90 days.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckConsistency(self, request, context):
"""This is a private alpha release of Cloud Bigtable replication. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Checks replication consistency based on a consistency token, that is, if
replication has caught up based on the conditions specified in the token
and the check request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SnapshotTable(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Creates a new snapshot in the specified cluster from the specified
source table. The cluster and the table must be in the same instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSnapshot(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Gets metadata information about the specified snapshot.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSnapshots(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Lists all snapshots associated with the specified cluster.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSnapshot(self, request, context):
"""This is a private alpha release of Cloud Bigtable snapshots. This feature
is not currently available to most Cloud Bigtable customers. This feature
might be changed in backward-incompatible ways and is not recommended for
production use. It is not subject to any SLA or deprecation policy.
Permanently deletes the specified snapshot.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BigtableTableAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateTable': grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
'CreateTableFromSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.CreateTableFromSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'ListTables': grpc.unary_unary_rpc_method_handler(
servicer.ListTables,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString,
),
'GetTable': grpc.unary_unary_rpc_method_handler(
servicer.GetTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
'DeleteTable': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler(
servicer.ModifyColumnFamilies,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString,
),
'DropRowRange': grpc.unary_unary_rpc_method_handler(
servicer.DropRowRange,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GenerateConsistencyToken': grpc.unary_unary_rpc_method_handler(
servicer.GenerateConsistencyToken,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString,
),
'CheckConsistency': grpc.unary_unary_rpc_method_handler(
servicer.CheckConsistency,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString,
),
'SnapshotTable': grpc.unary_unary_rpc_method_handler(
servicer.SnapshotTable,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'GetSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.GetSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString,
),
'ListSnapshots': grpc.unary_unary_rpc_method_handler(
servicer.ListSnapshots,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString,
),
'DeleteSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSnapshot,
request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 58.421222
| 167
| 0.803181
| 2,198
| 18,169
| 6.184258
| 0.102366
| 0.06393
| 0.045317
| 0.055028
| 0.801074
| 0.798205
| 0.788494
| 0.752152
| 0.692562
| 0.692562
| 0
| 0.007825
| 0.14189
| 18,169
| 310
| 168
| 58.609677
| 0.864024
| 0.222742
| 0
| 0.346939
| 1
| 0
| 0.115831
| 0.062409
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076531
| false
| 0
| 0.02551
| 0
| 0.112245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4a9e774c93c0065560802f0d99e32802d5bf9ac9
| 5,658
|
py
|
Python
|
fungi_tree.py
|
PRIS-CV/RelMatch
|
862a84b2bbf157ece4b21969c44d47beef9aa023
|
[
"MIT"
] | 8
|
2021-11-17T07:33:46.000Z
|
2021-12-24T05:45:37.000Z
|
fungi_tree.py
|
PRIS-CV/RelMatch
|
862a84b2bbf157ece4b21969c44d47beef9aa023
|
[
"MIT"
] | null | null | null |
fungi_tree.py
|
PRIS-CV/RelMatch
|
862a84b2bbf157ece4b21969c44d47beef9aa023
|
[
"MIT"
] | 1
|
2021-11-17T07:33:49.000Z
|
2021-11-17T07:33:49.000Z
|
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
trees = [[0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 1, 0], [2, 2, 1, 0, 0, 0], [3, 3, 2, 0, 0, 0], [4, 4, 0, 0, 0, 0], [5, 5, 3, 1, 1, 0], [6, 6, 4, 0, 0, 0], [7, 7, 5, 0, 0, 0], [8, 8, 6, 0, 0, 0], [9, 9, 7, 2, 0, 0], [10, 1, 0, 3, 1, 0], [11, 10, 8, 2, 0, 0], [12, 11, 9, 0, 0, 0], [13, 12, 10, 0, 0, 0], [14, 13, 10, 0, 0, 0], [15, 14, 11, 4, 2, 1], [16, 15, 12, 5, 0, 0], [17, 16, 0, 0, 0, 0], [18, 13, 10, 0, 0, 0], [19, 17, 13, 2, 0, 0], [20, 18, 14, 0, 0, 0], [21, 19, 13, 2, 0, 0], [22, 20, 15, 0, 0, 0], [23, 11, 9, 0, 0, 0], [24, 21, 16, 6, 3, 1], [25, 22, 12, 5, 0, 0], [26, 23, 9, 0, 0, 0], [27, 7, 5, 0, 0, 0], [28, 24, 17, 7, 0, 0], [29, 8, 6, 0, 0, 0], [30, 25, 18, 5, 0, 0], [31, 26, 19, 0, 0, 0], [32, 27, 20, 8, 4, 1], [33, 28, 9, 0, 0, 0], [34, 29, 0, 0, 1, 0], [35, 30, 21, 0, 0, 0], [36, 31, 22, 0, 0, 0], [37, 32, 12, 5, 0, 0], [38, 33, 23, 0, 0, 0], [39, 34, 24, 0, 0, 0], [40, 35, 25, 6, 3, 1], [41, 36, 26, 9, 5, 0], [42, 37, 14, 0, 0, 0], [43, 38, 10, 0, 0, 0], [44, 39, 15, 0, 0, 0], [45, 40, 27, 10, 0, 0], [46, 41, 3, 1, 1, 0], [47, 4, 0, 0, 0, 0], [48, 27, 20, 8, 4, 1], [49, 21, 16, 6, 3, 1], [50, 42, 28, 1, 0, 0], [51, 8, 6, 0, 0, 0], [52, 6, 4, 0, 1, 0], [53, 43, 29, 11, 6, 2], [54, 7, 5, 0, 0, 0], [55, 44, 10, 0, 0, 0], [56, 33, 30, 0, 0, 0], [57, 45, 28, 1, 0, 0], [58, 46, 31, 12, 0, 0], [59, 47, 32, 2, 0, 0], [60, 34, 24, 0, 0, 0], [61, 32, 12, 5, 0, 0], [62, 20, 15, 0, 0, 0], [63, 20, 15, 0, 0, 0], [64, 48, 33, 13, 0, 0], [65, 49, 23, 0, 0, 0], [66, 50, 34, 0, 0, 0], [67, 51, 35, 2, 0, 0], [68, 8, 6, 0, 0, 0], [69, 52, 1, 0, 0, 0], [70, 53, 11, 4, 2, 1], [71, 18, 14, 0, 0, 0], [72, 54, 0, 0, 0, 0], [73, 21, 16, 6, 3, 1], [74, 6, 4, 0, 0, 0], [75, 55, 33, 13, 0, 0], [76, 56, 36, 0, 0, 0], [77, 57, 23, 0, 0, 0], [78, 58, 37, 5, 0, 0], [79, 4, 0, 0, 0, 0], [80, 59, 28, 1, 0, 0], [81, 60, 38, 2, 0, 0], [82, 48, 33, 13, 0, 0], [83, 61, 9, 0, 0, 0], [84, 62, 22, 0, 0, 0], [85, 63, 39, 14, 0, 0], [86, 64, 10, 0, 0, 0], [87, 48, 33, 13, 0, 0], [88, 65, 40, 5, 0, 0], [89, 33, 23, 0, 0, 0], [90, 66, 9, 0, 0, 0], [91, 67, 41, 15, 7, 1], [92, 68, 19, 0, 0, 0], [93, 55, 33, 13, 0, 0], [94, 48, 33, 13, 0, 0], [95, 55, 33, 13, 0, 0], [96, 69, 42, 16, 0, 0], [97, 70, 43, 17, 8, 3], [98, 27, 20, 8, 4, 1], [99, 6, 4, 0, 0, 0], [100, 71, 44, 18, 9, 1], [101, 13, 10, 0, 0, 0], [102, 72, 12, 5, 0, 0], [103, 73, 45, 0, 0, 0], [104, 34, 24, 0, 0, 0], [105, 8, 6, 0, 0, 0], [106, 48, 33, 13, 0, 0], [107, 29, 0, 0, 1, 0], [108, 34, 24, 0, 0, 0], [109, 48, 33, 13, 0, 0], [110, 55, 33, 13, 0, 0], [111, 74, 46, 0, 0, 0], [112, 75, 39, 14, 1, 0], [113, 13, 10, 0, 0, 0], [114, 76, 38, 2, 0, 0], [115, 77, 47, 2, 0, 0], [116, 29, 0, 0, 1, 0], [117, 67, 41, 15, 10, 1], [118, 55, 33, 13, 0, 0], [119, 48, 33, 13, 0, 0], [120, 34, 24, 0, 0, 0], [121, 78, 48, 13, 0, 0], [122, 30, 21, 0, 0, 0], [123, 79, 12, 5, 0, 0], [124, 80, 49, 15, 10, 1], [125, 55, 33, 13, 0, 0], [126, 81, 28, 1, 0, 0], [127, 82, 50, 19, 0, 0], [128, 34, 24, 0, 0, 0], [129, 48, 33, 13, 0, 0], [130, 83, 27, 10, 0, 0], [131, 7, 5, 0, 0, 0], [132, 8, 6, 0, 0, 0], [133, 55, 33, 13, 0, 0], [134, 49, 9, 0, 0, 0], [135, 55, 33, 13, 0, 0], [136, 84, 51, 20, 0, 0], [137, 69, 42, 16, 0, 0], [138, 53, 11, 4, 2, 1], [139, 85, 52, 21, 11, 0], [140, 86, 0, 0, 0, 0], [141, 87, 19, 0, 0, 0], [142, 72, 12, 5, 0, 0], [143, 8, 6, 0, 0, 0], [144, 88, 53, 5, 0, 0], [145, 89, 12, 5, 0, 0], [146, 90, 54, 0, 0, 0], [147, 91, 12, 5, 0, 0], [148, 92, 47, 2, 0, 0], [149, 93, 48, 13, 0, 0], [150, 7, 5, 0, 0, 0], [151, 8, 6, 0, 0, 0], [152, 94, 41, 15, 7, 1], [153, 7, 5, 0, 0, 0], [154, 95, 55, 13, 0, 0], [155, 7, 5, 0, 0, 0], [156, 96, 33, 13, 0, 0], [157, 97, 8, 2, 0, 0], [158, 98, 47, 2, 0, 0], [159, 99, 56, 0, 0, 0], [160, 52, 1, 0, 0, 0], [161, 26, 19, 0, 0, 0], [162, 55, 33, 13, 0, 0], [163, 100, 57, 2, 0, 0], [164, 101, 12, 5, 0, 0], [165, 29, 0, 0, 0, 0], [166, 102, 58, 0, 0, 0], [167, 103, 9, 0, 0, 0], [168, 48, 33, 13, 0, 0], [169, 34, 24, 0, 0, 0], [170, 101, 12, 5, 0, 0], [171, 59, 28, 1, 0, 0], [172, 104, 28, 1, 0, 0], [173, 15, 12, 5, 0, 0], [174, 105, 12, 5, 0, 0], [175, 106, 28, 1, 0, 0], [176, 107, 12, 5, 0, 0], [177, 102, 58, 0, 0, 0], [178, 108, 13, 2, 0, 0], [179, 108, 13, 2, 0, 0], [180, 6, 4, 0, 0, 0], [181, 109, 59, 0, 0, 0], [182, 8, 6, 0, 0, 0], [183, 13, 10, 0, 0, 0], [184, 110, 60, 22, 4, 1], [185, 111, 61, 13, 0, 0], [186, 112, 62, 2, 0, 0], [187, 113, 28, 1, 0, 0], [188, 114, 63, 0, 0, 0], [189, 115, 58, 0, 0, 0], [190, 116, 26, 9, 5, 0], [191, 117, 64, 0, 1, 0], [192, 34, 24, 0, 1, 0], [193, 59, 28, 1, 0, 0], [194, 118, 8, 2, 0, 0], [195, 119, 65, 5, 0, 0], [196, 120, 13, 2, 0, 0], [197, 6, 4, 0, 1, 0], [198, 121, 6, 0, 0, 0], [199, 122, 66, 0, 0, 0]]
def get_tree_target(pair_1,pair_2):
tree_target_list = []
for i in range(pair_1.size(0)):
if trees[pair_1[i]][0] == trees[pair_2[i]][0]:
tree_target_list.append(0)
elif trees[pair_1[i]][1] == trees[pair_2[i]][1]:
tree_target_list.append(1)
elif trees[pair_1[i]][2] == trees[pair_2[i]][2]:
tree_target_list.append(2)
elif trees[pair_1[i]][3] == trees[pair_2[i]][3]:
tree_target_list.append(3)
elif trees[pair_1[i]][4] == trees[pair_2[i]][4]:
tree_target_list.append(4)
elif trees[pair_1[i]][5] == trees[pair_2[i]][5]:
tree_target_list.append(5)
else:
tree_target_list.append(6)
tree_target_list = Variable(torch.from_numpy(np.array(tree_target_list)).cuda())
return tree_target_list
| 128.590909
| 4,682
| 0.433369
| 1,355
| 5,658
| 1.780074
| 0.168266
| 0.223881
| 0.123134
| 0.047264
| 0.305141
| 0
| 0
| 0
| 0
| 0
| 0
| 0.455283
| 0.258925
| 5,658
| 43
| 4,683
| 131.581395
| 0.119962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.166667
| 0
| 0.25
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4aa37bec4d0337ab49fba6294aa68c30b82b9c8e
| 145
|
py
|
Python
|
cherrypie/utils/auth.py
|
zhengjiwen/cherrypie
|
6d73dc728918a444808a1915c069a06d4426adb7
|
[
"Apache-2.0"
] | 1
|
2018-09-02T03:14:14.000Z
|
2018-09-02T03:14:14.000Z
|
cherrypie/utils/auth.py
|
zhengjiwen/cherrypie
|
6d73dc728918a444808a1915c069a06d4426adb7
|
[
"Apache-2.0"
] | null | null | null |
cherrypie/utils/auth.py
|
zhengjiwen/cherrypie
|
6d73dc728918a444808a1915c069a06d4426adb7
|
[
"Apache-2.0"
] | null | null | null |
import hmac
def hash_passwd(passwd):
hash_passwd = hmac.new(passwd)
hash_passwd.update(passwd[1:5])
return hash_passwd.hexdigest()
| 18.125
| 35
| 0.724138
| 21
| 145
| 4.809524
| 0.52381
| 0.39604
| 0.316832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0.165517
| 145
| 7
| 36
| 20.714286
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.8
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
4aaa9d34c5badee0177831e7aa78d3f6d5eca15b
| 578
|
py
|
Python
|
textbox/data/dataloader/__init__.py
|
StevenTang1998/TextBox
|
acd8298c7e6618384d585146f799d02cc475520c
|
[
"MIT"
] | 347
|
2021-01-09T07:55:55.000Z
|
2022-03-27T00:46:36.000Z
|
textbox/data/dataloader/__init__.py
|
StevenTang1998/TextBox
|
acd8298c7e6618384d585146f799d02cc475520c
|
[
"MIT"
] | 18
|
2021-01-12T07:37:06.000Z
|
2022-01-11T02:26:49.000Z
|
textbox/data/dataloader/__init__.py
|
StevenTang1998/TextBox
|
acd8298c7e6618384d585146f799d02cc475520c
|
[
"MIT"
] | 67
|
2021-01-09T07:23:52.000Z
|
2022-03-27T12:02:12.000Z
|
from textbox.data.dataloader.abstract_dataloader import AbstractDataLoader
from textbox.data.dataloader.single_sent_dataloader import SingleSentenceDataLoader
from textbox.data.dataloader.paired_sent_dataloader import PairedSentenceDataLoader
from textbox.data.dataloader.attr_sent_dataloader import AttributedSentenceDataLoader
from textbox.data.dataloader.kg_sent_dataloader import KGSentenceDataLoader
from textbox.data.dataloader.wikibio_sent_dataloader import WikiBioSentenceDataLoader
from textbox.data.dataloader.rotowire_sent_dataloader import RotoWireSentenceDataLoader
| 82.571429
| 87
| 0.916955
| 62
| 578
| 8.33871
| 0.322581
| 0.148936
| 0.203095
| 0.338491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046713
| 578
| 7
| 87
| 82.571429
| 0.938294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
43ac3e9010cefc332cc92e1268d0c2130ae2eb8d
| 38
|
py
|
Python
|
hello.py
|
sbu-nuclear-astro/test-repo
|
1a2222327da6e5a11f21cb9d9316ede34c778d72
|
[
"BSD-3-Clause"
] | null | null | null |
hello.py
|
sbu-nuclear-astro/test-repo
|
1a2222327da6e5a11f21cb9d9316ede34c778d72
|
[
"BSD-3-Clause"
] | 3
|
2019-05-03T16:32:55.000Z
|
2019-05-03T16:55:44.000Z
|
hello.py
|
sbu-nuclear-astro/test-repo
|
1a2222327da6e5a11f21cb9d9316ede34c778d72
|
[
"BSD-3-Clause"
] | 18
|
2019-05-03T16:08:30.000Z
|
2019-05-03T16:20:12.000Z
|
print("hello darkness my old friend")
| 19
| 37
| 0.763158
| 6
| 38
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.878788
| 0
| 0
| 0
| 0
| 0
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
78f927ab59c49ec2cfad87118ec4d30e66a408bb
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/console/commands/env/remove.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/console/commands/env/remove.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/console/commands/env/remove.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/54/10/11/1f3ccc887810e21c2d1ef9a7066e207e08448f16097bdcfd3a38e5f6d6
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4375
| 0
| 96
| 1
| 96
| 96
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6020fe141a8139845e7c06fa33b3d0bfe340b8ba
| 2,565
|
py
|
Python
|
CodingInterview2/48_LongestSubstringWithoutDup/test_longest_substring_without_dup.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 10
|
2020-07-06T11:00:58.000Z
|
2022-01-29T09:25:24.000Z
|
CodingInterview2/48_LongestSubstringWithoutDup/test_longest_substring_without_dup.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | null | null | null |
CodingInterview2/48_LongestSubstringWithoutDup/test_longest_substring_without_dup.py
|
hscspring/TheAlgorithms-Python
|
5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f
|
[
"MIT"
] | 3
|
2020-07-13T06:39:23.000Z
|
2020-08-15T16:29:48.000Z
|
from longest_substring_without_dup import find_sub_string_length_set
from longest_substring_without_dup import find_sub_string_length_dict
from longest_substring_without_dup import find_sub_string_length_dp1
from longest_substring_without_dup import find_sub_string_length_dp2
def test1():
assert find_sub_string_length_set("abcacfrar") == 4
assert find_sub_string_length_dict("abcacfrar") == 4
assert find_sub_string_length_dp1("abcacfrar") == 4
assert find_sub_string_length_dp2("abcacfrar") == 4
def test2():
assert find_sub_string_length_set("acfrarabc") == 4
assert find_sub_string_length_dict("acfrarabc") == 4
assert find_sub_string_length_dp1("acfrarabc") == 4
assert find_sub_string_length_dp2("acfrarabc") == 4
def test3():
assert find_sub_string_length_set("arabcacfr") == 4
assert find_sub_string_length_dict("arabcacfr") == 4
assert find_sub_string_length_dp1("arabcacfr") == 4
assert find_sub_string_length_dp2("arabcacfr") == 4
def test4():
assert find_sub_string_length_set("aaaa") == 1
assert find_sub_string_length_dict("aaaa") == 1
assert find_sub_string_length_dp1("aaaa") == 1
assert find_sub_string_length_dp2("aaaa") == 1
def test5():
assert find_sub_string_length_set("abcdefg") == 7
assert find_sub_string_length_dict("abcdefg") == 7
assert find_sub_string_length_dp1("abcdefg") == 7
assert find_sub_string_length_dp2("abcdefg") == 7
def test6():
assert find_sub_string_length_set("aaabbbccc") == 2
assert find_sub_string_length_dict("aaabbbccc") == 2
assert find_sub_string_length_dp1("aaabbbccc") == 2
assert find_sub_string_length_dp2("aaabbbccc") == 2
def test7():
assert find_sub_string_length_set("abcdcba") == 4
assert find_sub_string_length_dict("abcdcba") == 4
assert find_sub_string_length_dp1("abcdcba") == 4
assert find_sub_string_length_dp2("abcdcba") == 4
def test8():
assert find_sub_string_length_set("abcdaef") == 6
assert find_sub_string_length_dict("abcdaef") == 6
assert find_sub_string_length_dp1("abcdaef") == 6
assert find_sub_string_length_dp2("abcdaef") == 6
def test9():
assert find_sub_string_length_set("a") == 1
assert find_sub_string_length_dict("a") == 1
assert find_sub_string_length_dp1("a") == 1
assert find_sub_string_length_dp2("a") == 1
def test10():
assert find_sub_string_length_set("") == 0
assert find_sub_string_length_dict("") == 0
assert find_sub_string_length_dp1("") == 0
assert find_sub_string_length_dp2("") == 0
| 34.662162
| 69
| 0.748538
| 380
| 2,565
| 4.557895
| 0.107895
| 0.177829
| 0.330254
| 0.482679
| 0.911663
| 0.904157
| 0.674365
| 0.127021
| 0.127021
| 0.127021
| 0
| 0.033333
| 0.146199
| 2,565
| 73
| 70
| 35.136986
| 0.757534
| 0
| 0
| 0
| 0
| 0
| 0.096686
| 0
| 0
| 0
| 0
| 0
| 0.740741
| 1
| 0.185185
| true
| 0
| 0.074074
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60337dca29e8e691455f118723cf582e68978de5
| 1,713
|
py
|
Python
|
tests/test_greedy.py
|
arogozhnikov/OBDT
|
1efbcdce55e21262a720f7e885879c660b8b4873
|
[
"MIT"
] | 4
|
2015-03-12T12:39:09.000Z
|
2015-07-08T03:27:56.000Z
|
tests/test_greedy.py
|
arogozhnikov/OBDT
|
1efbcdce55e21262a720f7e885879c660b8b4873
|
[
"MIT"
] | null | null | null |
tests/test_greedy.py
|
arogozhnikov/OBDT
|
1efbcdce55e21262a720f7e885879c660b8b4873
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, absolute_import
__author__ = 'Alex Rogozhnikov'
import numpy
from hep_ml.losses import CompositeLossFunction, MSELossFunction
from pruning import greedy, utils
def test_pruner(mx_filename='../data/formula.mx', higgs_filename='../data/training.csv'):
with open(mx_filename, 'rb') as mx:
formula_mx = mx.read()
X, y, w = utils.get_higgs_data(higgs_filename)
X = numpy.array(X, dtype='float32')
pruner = greedy.GreedyPruner(loss_function=CompositeLossFunction(), iterations=5, n_kept_best=0)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.GreedyPruner(loss_function=CompositeLossFunction(), iterations=5, n_kept_best=5)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.GreedyPruner(loss_function=MSELossFunction(), iterations=5, n_kept_best=5)
pruner.prune(formula_mx, X, y, w, verbose=True)
def test_nesterov_pruner(mx_filename='../data/formula.mx', higgs_filename='../data/training.csv', iterations=30):
with open(mx_filename, 'rb') as mx:
formula_mx = mx.read()
X, y, w = utils.get_higgs_data(higgs_filename)
X = numpy.array(X, dtype='float32')
pruner = greedy.NesterovPruner(loss_function=MSELossFunction(), iterations=iterations, n_nesterov_steps=0)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.NesterovPruner(loss_function=MSELossFunction(), iterations=iterations, n_nesterov_steps=1)
pruner.prune(formula_mx, X, y, w, verbose=True)
pruner = greedy.NesterovPruner(loss_function=MSELossFunction(), iterations=iterations, n_nesterov_steps=2)
pruner.prune(formula_mx, X, y, w, verbose=True)
assert 0 == 1
| 38.066667
| 113
| 0.737887
| 235
| 1,713
| 5.157447
| 0.251064
| 0.074257
| 0.019802
| 0.09901
| 0.812706
| 0.812706
| 0.812706
| 0.812706
| 0.812706
| 0.784653
| 0
| 0.011502
| 0.137186
| 1,713
| 44
| 114
| 38.931818
| 0.808525
| 0
| 0
| 0.5
| 0
| 0
| 0.064215
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.214286
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60a3ae2bbf640d06c7973f327eac6d535a9f51e2
| 14,320
|
py
|
Python
|
function_tests/selenium_test/operation/resource.py
|
NAL-SupportTeam/NECCS-NO-Automation
|
d55df831dcfcec792f7d48392eea3bda3157db21
|
[
"Apache-2.0"
] | null | null | null |
function_tests/selenium_test/operation/resource.py
|
NAL-SupportTeam/NECCS-NO-Automation
|
d55df831dcfcec792f7d48392eea3bda3157db21
|
[
"Apache-2.0"
] | null | null | null |
function_tests/selenium_test/operation/resource.py
|
NAL-SupportTeam/NECCS-NO-Automation
|
d55df831dcfcec792f7d48392eea3bda3157db21
|
[
"Apache-2.0"
] | 1
|
2018-09-19T07:36:49.000Z
|
2018-09-19T07:36:49.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# COPYRIGHT (C) NEC CORPORATION 2017
#
import inspect
import base
from conf import config
from selenium.common import exceptions as selenium_except
from selenium import webdriver
from selenium.webdriver.support.ui import Select
SET_BASE_URL = getattr(config, 'SET_BASE_URL')
RESOURCE_LIST = {
"Global IP": "Global IP",
'InterSecVM/SG(Ext)': 'InterSecVM/SG',
'FortiGateVM(5.2.4)': 'FortiGateVM',
'PaloAltoVM': 'PaloAltoVM',
'InterSecVM/SG(Pub)': 'InterSecVM/SG',
'FortiGateVM(5.4.1)': 'FortiGateVM',
'InterSecVM/LB': 'InterSecVM/LB',
'BIG-IP VE': 'BIG-IP VE',
'vThunder(4.0.1)': 'vThunder',
'vThunder(4.1.1)': 'vThunder',
'Firefly': 'Firefly',
'CSR1000v': 'Cisco',
'CSR1000v (Encrypted)': 'Cisco',
'CSR1000v (Unencrypted)': 'Cisco',
}
class ResourceOperations(base.SeleniumBase):
def __init__(self, driver, evidence):
super(ResourceOperations, self).__init__(driver, evidence)
self.driver = driver
self.evidence = evidence
def check_list_resource(self):
driver = self.driver
# Show resource list
self.list_resource()
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"check_data")
def check_list_resource_before(self, input_params):
driver = self.driver
# Get filter key
search_key = self._get_search_key(input_params)
# Show resource list
self.list_resource()
driver.find_element_by_name("resource__filter__q").clear()
driver.find_element_by_name("resource__filter__q").send_keys(search_key)
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"resource_list", "before")
def check_list_resource_after(self, input_params):
driver = self.driver
# Get filter key
search_key = self._get_search_key(input_params)
# Show resource list
self.list_resource()
driver.find_element_by_name("resource__filter__q").clear()
driver.find_element_by_name("resource__filter__q").send_keys(search_key)
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"resource_list", "after")
def check_detail_resource(self, input_params):
driver = self.driver
# Show resource list
self.detail_resource(input_params)
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"check_data")
def check_list_resource_admin(self):
driver = self.driver
# Show resource list
self.list_resource_admin()
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"check_data")
def check_list_resource_admin_before(self, input_params):
driver = self.driver
# Get filter key
search_key = self._get_search_key(input_params)
# Show resource list
self.list_resource_admin()
driver.find_element_by_name("resource__filter__q").clear()
driver.find_element_by_name("resource__filter__q").send_keys(search_key)
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"admin_resource_list", "before")
def check_list_resource_admin_after(self, input_params):
driver = self.driver
# Get filter key
search_key = self._get_search_key(input_params)
# Show resource list
self.list_resource_admin()
driver.find_element_by_name("resource__filter__q").clear()
driver.find_element_by_name("resource__filter__q").send_keys(search_key)
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"admin_resource_list", "after")
def check_detail_resource_admin(self, input_params):
driver = self.driver
# Show resource list
self.detail_resource_admin(input_params)
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"check_data")
def check_create_resource(self):
driver = self.driver
# Check before change globalip status
globalip_line = 0
for num in range(1, 10):
try:
status = self.get_data_from_line(str(num), "1")
except selenium_except.NoSuchElementException:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be payout")
if self.check_status(status, "Unacquired"):
globalip_line = num
break
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be payout")
# Payout globalip operation
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list", "before")
driver.find_element_by_id("resource_globalip__action_create").click()
self.sleep_time()
driver.find_element_by_id("id_count").clear()
driver.find_element_by_id("id_count").send_keys(1)
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"input_params")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.sleep_time()
# Check result globalip list
status = self.get_data_from_line(str(globalip_line), "1")
if self.check_status(status, "Unused"):
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list", "after")
pass
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list", "after")
raise Exception("Status is invalid after payout of global IP")
def check_update_resource_used(self):
driver = self.driver
# Check before change globalip status
globalip_line = 0
for num in range(1, 10):
try:
status = self.get_data_from_line(str(num), "1")
except selenium_except.NoSuchElementException:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be update status")
if self.check_status(status, "Unused"):
globalip_line = num
break
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be update status")
# Update globalip operation
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list_unused_to_used", "before")
driver.find_element_by_xpath("//tr[" + str(globalip_line) + "]/td[4]/div/a").click()
self.sleep_time()
Select(driver.find_element_by_id("status")).select_by_value("2")
try:
Select(driver.find_element_by_id("node_id")).select_by_index(0)
except:
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"input_params_unused_to_used_error")
raise Exception("Parameter of change global IP is incorrect")
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"input_params_unused_to_used")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.sleep_time()
# Check result globalip list
status = self.get_data_from_line(str(globalip_line), "1")
if self.check_status(status, "Used"):
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list_unused_to_used", "after")
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list_unused_to_used", "after")
raise Exception("Status is invalid after change global IP")
def check_update_resource_unused(self):
driver = self.driver
# Check before change globalip status
globalip_line = 0
for num in range(1, 10):
try:
status = self.get_data_from_line(str(num), "1")
except selenium_except.NoSuchElementException:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be update status")
if self.check_status(status, "Used"):
globalip_line = num
break
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be update status")
# Update globalip operation
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list_used_to_unused", "before")
driver.find_element_by_xpath("//tr[" + str(globalip_line) + "]/td[4]/a").click()
self.sleep_time()
Select(driver.find_element_by_id("status")).select_by_value("0")
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"input_params_used_to_unused")
driver.find_element_by_css_selector("input.btn.btn-primary").click()
self.sleep_time()
# Check result globalip list
status = self.get_data_from_line(str(globalip_line), "1")
if self.check_status(status, "Unused"):
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list_used_to_unused", "after")
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list_used_to_unused", "after")
raise Exception("Status is invalid after change global IP")
def check_delete_resource(self):
driver = self.driver
# Check before change globalip status
globalip_line = 0
for num in range(1, 10):
try:
status = self.get_data_from_line(str(num), "1")
except selenium_except.NoSuchElementException:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be refund")
if self.check_status(status, "Unused"):
globalip_line = num
break
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"not_exist_globalip")
raise Exception("There is no global IP that can be refund")
# Return globalip operation
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list", "before")
driver.find_element_by_xpath("//tr[" + str(globalip_line) + "]/td[4]/div/a[2]").click()
driver.find_element_by_xpath("//li/button").click()
self.sleep_time()
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"input_params")
driver.find_element_by_link_text("Delete Resource").click()
self.sleep_time(10)
driver.refresh()
# Check result globalip list
status = self.get_data_from_line(str(globalip_line), "1")
if self.check_status(status, "Unacquired"):
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list", "after")
else:
self.get_screenshot(inspect.currentframe().f_back.f_code.co_name,
"globalip_list", "after")
raise Exception("Status is invalid after refund global IP")
def list_resource(self):
# Show resource list
driver = self.driver
driver.get(SET_BASE_URL + "/dashboard/project/resource/")
self.sleep_time()
def list_resource_admin(self):
# Show resource list for admin
driver = self.driver
driver.get(SET_BASE_URL + "/dashboard/admin/resource/")
self.sleep_time()
def detail_resource(self, input_params):
# Show resource detail
driver = self.driver
self.list_resource()
driver.find_element_by_link_text(input_params["resource_name"]).click()
self.sleep_time()
def detail_resource_admin(self, input_params):
# Show resource detail for admin
driver = self.driver
self.list_resource_admin()
driver.find_element_by_link_text(input_params["resource_name"]).click()
self.sleep_time()
def _get_search_key(self, input_params):
# Get keywords to search in resource list
search_key = ""
if "device_type" in input_params:
search_key = input_params["device_type"]
elif "service_type" in input_params:
search_key = input_params["service_type"]
return RESOURCE_LIST.get(search_key, "")
| 41.627907
| 95
| 0.623673
| 1,728
| 14,320
| 4.864005
| 0.117477
| 0.037478
| 0.066746
| 0.09423
| 0.807615
| 0.796669
| 0.763117
| 0.741344
| 0.730042
| 0.719572
| 0
| 0.006398
| 0.279609
| 14,320
| 343
| 96
| 41.749271
| 0.808356
| 0.089804
| 0
| 0.673307
| 0
| 0
| 0.165756
| 0.03272
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071713
| false
| 0.003984
| 0.023904
| 0
| 0.103586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60c36c64787448a491fc11c4779dfb32c448b557
| 2,056
|
py
|
Python
|
src/scml/tests/test_fillna.py
|
seahrh/sgcharts-ml
|
de864a28e8ddca1738ca1155a4ad583b4a0fda6b
|
[
"MIT"
] | null | null | null |
src/scml/tests/test_fillna.py
|
seahrh/sgcharts-ml
|
de864a28e8ddca1738ca1155a4ad583b4a0fda6b
|
[
"MIT"
] | null | null | null |
src/scml/tests/test_fillna.py
|
seahrh/sgcharts-ml
|
de864a28e8ddca1738ca1155a4ad583b4a0fda6b
|
[
"MIT"
] | null | null | null |
import numpy as np
from scml import fillna
class TestFillna:
def test_when_nan_is_not_present_then_do_not_fill_1d_array(self):
np.testing.assert_allclose(
fillna(np.array([1.2, 1.2]), values=np.array([0, 0]), add_flag=False),
[1.2, 1.2],
)
np.testing.assert_allclose(
fillna(np.array([1.2, 1.2]), values=np.array([0, 0]), add_flag=True),
[1.2, 1.2, 0, 0],
)
def test_when_nan_is_not_present_then_do_not_fill_2d_array(self):
np.testing.assert_allclose(
fillna(
np.array([[1.2, 1.2], [1.2, 1.2]]),
values=np.array([[0, 0], [0, 0]]),
add_flag=False,
),
[[1.2, 1.2], [1.2, 1.2]],
)
np.testing.assert_allclose(
fillna(
np.array([[1.2, 1.2], [1.2, 1.2]]),
values=np.array([[0, 0], [0, 0]]),
add_flag=True,
),
[[1.2, 1.2, 0, 0], [1.2, 1.2, 0, 0]],
)
def test_when_nan_is_present_then_do_fill_1d_array(self):
np.testing.assert_allclose(
fillna(np.array([1.2, np.nan]), values=np.array([0, 0]), add_flag=False),
[1.2, 0],
)
np.testing.assert_allclose(
fillna(np.array([1.2, np.nan]), values=np.array([0, 0]), add_flag=True),
[1.2, 0, 0, 1],
)
def test_when_nan_is_present_then_do_fill_2d_array(self):
np.testing.assert_allclose(
fillna(
np.array([[1.2, np.nan], [np.nan, 1.2]]),
values=np.array([[0, 0], [0, 0]]),
add_flag=False,
),
[[1.2, 0], [0, 1.2]],
)
np.testing.assert_allclose(
fillna(
np.array([[1.2, np.nan], [np.nan, 1.2]]),
values=np.array([[0, 0], [0, 0]]),
add_flag=True,
),
[[1.2, 0, 0, 1], [0, 1.2, 1, 0]],
)
| 33.704918
| 86
| 0.448444
| 292
| 2,056
| 2.965753
| 0.116438
| 0.083141
| 0.055427
| 0.069284
| 0.93649
| 0.93649
| 0.935335
| 0.935335
| 0.933025
| 0.882217
| 0
| 0.092116
| 0.376946
| 2,056
| 60
| 87
| 34.266667
| 0.583919
| 0
| 0
| 0.509091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 1
| 0.072727
| false
| 0
| 0.036364
| 0
| 0.127273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60e521597d31b0001965a3ddec6d15cbe7aa03e0
| 185
|
py
|
Python
|
mongoengine_simple/database/models.py
|
LordGhostX/flask-mongo-starter
|
10a0ef949e8626cf466e2c9410085069eb5dba36
|
[
"MIT"
] | 7
|
2020-03-26T20:23:44.000Z
|
2020-04-11T21:10:14.000Z
|
mongoengine_simple/database/models.py
|
LordGhostX/flask-mongo-starter
|
10a0ef949e8626cf466e2c9410085069eb5dba36
|
[
"MIT"
] | null | null | null |
mongoengine_simple/database/models.py
|
LordGhostX/flask-mongo-starter
|
10a0ef949e8626cf466e2c9410085069eb5dba36
|
[
"MIT"
] | 3
|
2020-04-10T17:59:54.000Z
|
2022-01-04T01:52:53.000Z
|
from .db import db
class Movie(db.Document):
name = db.StringField(required=True, unique=True)
casts = db.StringField(required=True)
genres = db.StringField(required=True)
| 26.428571
| 53
| 0.724324
| 25
| 185
| 5.36
| 0.52
| 0.291045
| 0.470149
| 0.559701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156757
| 185
| 6
| 54
| 30.833333
| 0.858974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
60fd30a6418259683171c86d237b5c410e8a44f9
| 29
|
py
|
Python
|
plugin/src/test/resources/testData/set_declaration.py
|
ElenaErratic/bug-finder
|
dba6de2cde12b4b75f8f36668f5d785b460d6641
|
[
"Apache-2.0"
] | 3
|
2020-08-31T12:39:53.000Z
|
2021-05-12T10:04:54.000Z
|
plugin/src/test/resources/testData/set_declaration.py
|
ElenaErratic/bug-finder
|
dba6de2cde12b4b75f8f36668f5d785b460d6641
|
[
"Apache-2.0"
] | 1
|
2020-11-27T11:28:47.000Z
|
2020-11-27T11:28:47.000Z
|
plugin/src/test/resources/testData/set_declaration.py
|
ElenaErratic/bug-finder
|
dba6de2cde12b4b75f8f36668f5d785b460d6641
|
[
"Apache-2.0"
] | 1
|
2021-06-03T12:45:50.000Z
|
2021-06-03T12:45:50.000Z
|
def func():
s = {1, 1, 2}
| 14.5
| 17
| 0.37931
| 6
| 29
| 1.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 0.344828
| 29
| 2
| 17
| 14.5
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7161a589cf3e4d9dbcfd9d8f0d55bd0294a49cf4
| 40,604
|
py
|
Python
|
falco/model/jacobians.py
|
kian1377/falco-python
|
a9666629845fc72957cd89339f924b9cfb7ce6f5
|
[
"Apache-2.0"
] | 4
|
2019-05-22T22:24:01.000Z
|
2021-07-21T13:32:36.000Z
|
falco/model/jacobians.py
|
kian1377/falco-python
|
a9666629845fc72957cd89339f924b9cfb7ce6f5
|
[
"Apache-2.0"
] | 11
|
2018-06-22T01:05:07.000Z
|
2021-11-03T13:46:25.000Z
|
falco/model/jacobians.py
|
kian1377/falco-python
|
a9666629845fc72957cd89339f924b9cfb7ce6f5
|
[
"Apache-2.0"
] | 2
|
2018-06-21T23:58:06.000Z
|
2021-07-13T21:25:23.000Z
|
# pylint: disable=E501
"""Functions to compute the Jacobian for EFC."""
import numpy as np
from numpy.fft import fftshift, fft2
import falco
from falco.util import pad_crop
import falco.prop as fp
def lyot(mp, im, idm):
"""
Differential model used to compute the ctrl Jacobian for Lyot coronagraph.
Specialized compact model used to compute the DM response matrix, aka the
control Jacobian for a Lyot coronagraph. Can include an apodizer, making it
an apodized pupil Lyot coronagraph (APLC). Does not include unknown
aberrations of the full, "truth" model. This model propagates the
first-order Taylor expansion of the phase from the poke of each actuator
of the deformable mirror.
Parameters
----------
mp : ModelParameters
Structure containing optical model parameters
Returns
-------
Gzdl : numpy ndarray
Complex-valued, 2-D array containing the Jacobian for the
specified Zernike mode, DM number, and wavelength.
"""
modvar = falco.config.Object() # Initialize the new structure
modvar.sbpIndex = mp.jac.sbp_inds[im]
modvar.zernIndex = mp.jac.zern_inds[im]
wvl = mp.sbp_centers[modvar.sbpIndex]
mirrorFac = 2. # Phase change is twice the DM surface height.
NdmPad = int(mp.compact.NdmPad)
if mp.flagRotation:
NrelayFactor = 1
else:
NrelayFactor = 0 # zero out the number of relays
if mp.coro.upper() in ('LC', 'APLC', 'FLC', 'SPLC'):
fpm = mp.F3.compact.mask
transOuterFPM = 1. # transmission of points outside the FPM.
elif mp.coro.upper() in ('HLC',):
fpm = np.squeeze(mp.compact.fpmCube[:, :, modvar.sbpIndex]) # complex
# Complex transmission of the points outside the FPM (just fused silica
# with optional dielectric and no metal).
transOuterFPM = fpm[0, 0]
"""Input E-fields"""
Ein = np.squeeze(mp.P1.compact.E[:, :, modvar.sbpIndex])
# Apply a Zernike (in amplitude) at input pupil
# Used only for Zernike sensitivity control, which requires the perfect
# E-field of the differential Zernike term.
if not (modvar.zernIndex == 1):
indsZnoll = modvar.zernIndex # Just send in 1 Zernike mode
zernMat = np.squeeze(falco.zern.gen_norm_zern_maps(mp.P1.compact.Nbeam,
mp.centering, indsZnoll))
zernMat = pad_crop(zernMat, mp.P1.compact.Narr)
Ein = Ein*zernMat*(2*np.pi/wvl)*mp.jac.Zcoef[mp.jac.zerns ==
modvar.zernIndex]
""" Masks and DM surfaces """
pupil = pad_crop(mp.P1.compact.mask, NdmPad)
Ein = pad_crop(Ein, NdmPad)
# Re-image the apodizer from pupil P3 back to pupil P2.
if(mp.flagApod):
apodReimaged = pad_crop(mp.P3.compact.mask, NdmPad)
apodReimaged = fp.relay(apodReimaged, NrelayFactor*mp.Nrelay2to3, mp.centering)
else:
apodReimaged = np.ones((NdmPad, NdmPad))
# Compute the DM surfaces for the current DM commands
if any(mp.dm_ind == 1):
DM1surf = pad_crop(mp.dm1.compact.surfM, NdmPad)
# DM1surf = falco.dm.gen_surf_from_act(mp.dm1, mp.dm1.compact.dx, NdmPad)
else:
DM1surf = np.zeros((NdmPad, NdmPad))
if any(mp.dm_ind == 2):
DM2surf = pad_crop(mp.dm2.compact.surfM, NdmPad)
# DM2surf = falco.dm.gen_surf_from_act(mp.dm2, mp.dm2.compact.dx, NdmPad)
else:
DM2surf = np.zeros((NdmPad, NdmPad))
if mp.flagDM1stop:
DM1stop = pad_crop(mp.dm1.compact.mask, NdmPad)
else:
DM1stop = np.ones((NdmPad, NdmPad))
if(mp.flagDM2stop):
DM2stop = pad_crop(mp.dm2.compact.mask, NdmPad)
else:
DM2stop = np.ones((NdmPad, NdmPad))
# This block is for BMC surface error testing
if mp.flagDMwfe: # if(mp.flagDMwfe && (mp.P1.full.Nbeam==mp.P1.compact.Nbeam))
if any(mp.dm_ind == 1):
Edm1WFE = np.exp(2*np.pi*1j/wvl*pad_crop(mp.dm1.compact.wfe, NdmPad, 'extrapval', 0))
else:
Edm1WFE = np.ones((NdmPad, NdmPad))
if any(mp.dm_ind == 2):
Edm2WFE = np.exp(2*np.pi*1j/wvl*pad_crop(mp.dm2.compact.wfe, NdmPad, 'extrapval', 0))
else:
Edm2WFE = np.ones((NdmPad, NdmPad))
else:
Edm1WFE = np.ones((NdmPad, NdmPad))
Edm2WFE = np.ones((NdmPad, NdmPad))
"""Propagation"""
# Define pupil P1 and Propagate to pupil P2
EP1 = pupil*Ein # E-field at pupil plane P1
EP2 = fp.relay(EP1, NrelayFactor*mp.Nrelay1to2, mp.centering)
# Propagate from P2 to DM1, and apply DM1 surface and aperture stop
if not abs(mp.d_P2_dm1) == 0:
Edm1 = fp.ptp(EP2, mp.P2.compact.dx*NdmPad, wvl, mp.d_P2_dm1)
else:
Edm1 = EP2
Edm1out = Edm1*Edm1WFE*DM1stop*np.exp(mirrorFac*2*np.pi*1j*DM1surf/wvl)
""" ---------- DM1 ---------- """
if idm == 1:
Gzdl = np.zeros((mp.Fend.corr.Npix, mp.dm1.Nele), dtype=complex)
# Two array sizes (at same resolution) of influence functions for MFT
# and angular spectrum
NboxPad1AS = int(mp.dm1.compact.NboxAS) # array size for FFT-AS propagations from DM1->DM2->DM1
# Adjust the sub-array location of the influence function for the added zero padding
mp.dm1.compact.xy_box_lowerLeft_AS = mp.dm1.compact.xy_box_lowerLeft -\
(mp.dm1.compact.NboxAS-mp.dm1.compact.Nbox)/2.
if any(mp.dm_ind == 2):
DM2surf = pad_crop(DM2surf, mp.dm1.compact.NdmPad)
else:
DM2surf = np.zeros((mp.dm1.compact.NdmPad, mp.dm1.compact.NdmPad))
if(mp.flagDM2stop):
DM2stop = pad_crop(DM2stop, mp.dm1.compact.NdmPad)
else:
DM2stop = np.ones((mp.dm1.compact.NdmPad, mp.dm1.compact.NdmPad))
apodReimaged = pad_crop(apodReimaged, mp.dm1.compact.NdmPad)
Edm1pad = pad_crop(Edm1out, mp.dm1.compact.NdmPad) # Pad or crop for expected sub-array indexing
Edm2WFEpad = pad_crop(Edm2WFE, mp.dm1.compact.NdmPad) # Pad or crop for expected sub-array indexing
# Propagate each actuator from DM1 through the optical system
Gindex = 0 # initialize index counter
for iact in mp.dm1.act_ele:
# Compute only for influence functions that are not zeroed out
if np.sum(np.abs(mp.dm1.compact.inf_datacube[:, :, iact])) > 1e-12:
# x- and y- coordinate indices of the padded influence function in the full padded pupil
x_box_AS_ind = np.arange(mp.dm1.compact.xy_box_lowerLeft_AS[0,iact], mp.dm1.compact.xy_box_lowerLeft_AS[0, iact]+NboxPad1AS, dtype=int) # x-indices in pupil arrays for the box
y_box_AS_ind = np.arange(mp.dm1.compact.xy_box_lowerLeft_AS[1,iact], mp.dm1.compact.xy_box_lowerLeft_AS[1, iact]+NboxPad1AS, dtype=int) # y-indices in pupil arrays for the box
indBoxAS = np.ix_(y_box_AS_ind, x_box_AS_ind)
# x- and y- coordinates of the UN-padded influence function in the full padded pupil
x_box = mp.dm1.compact.x_pupPad[x_box_AS_ind] # full pupil x-coordinates of the box
y_box = mp.dm1.compact.y_pupPad[y_box_AS_ind] # full pupil y-coordinates of the box
# Propagate from DM1 to DM2, and then back to P2
dEbox = (mirrorFac*2*np.pi*1j/wvl)*pad_crop((mp.dm1.VtoH.reshape(mp.dm1.Nact**2)[iact])*np.squeeze(mp.dm1.compact.inf_datacube[:,:,iact]),NboxPad1AS) # Pad influence function at DM1 for angular spectrum propagation.
dEbox = fp.ptp(dEbox*Edm1pad[np.ix_(y_box_AS_ind,x_box_AS_ind)], mp.P2.compact.dx*NboxPad1AS,wvl, mp.d_dm1_dm2) # forward propagate to DM2 and apply DM2 E-field
dEP2box = fp.ptp(dEbox*Edm2WFEpad[np.ix_(y_box_AS_ind,x_box_AS_ind)]*DM2stop[np.ix_(y_box_AS_ind,x_box_AS_ind)]*np.exp(mirrorFac*2*np.pi*1j/wvl*DM2surf[np.ix_(y_box_AS_ind,x_box_AS_ind)]), mp.P2.compact.dx*NboxPad1AS,wvl,-1*(mp.d_dm1_dm2 + mp.d_P2_dm1) ) # back-propagate to DM1
# dEbox = fp.ptp_inf_func(dEbox*Edm1pad[np.ix_(y_box_AS_ind,x_box_AS_ind)], mp.P2.compact.dx*NboxPad1AS,wvl, mp.d_dm1_dm2, mp.dm1.dm_spacing, mp.propMethodPTP) # forward propagate to DM2 and apply DM2 E-field
# dEP2box = fp.ptp_inf_func(dEbox.*Edm2WFEpad[np.ix_(y_box_AS_ind,x_box_AS_ind)]*DM2stop(y_box_AS_ind,x_box_AS_ind).*exp(mirrorFac*2*np.pi*1j/wvl*DM2surf(y_box_AS_ind,x_box_AS_ind)), mp.P2.compact.dx*NboxPad1AS,wvl,-1*(mp.d_dm1_dm2 + mp.d_P2_dm1), mp.dm1.dm_spacing, mp.propMethodPTP ) # back-propagate to DM1
#
# To simulate going forward to the next pupil plane (with the apodizer) most efficiently,
# First, back-propagate the apodizer (by rotating 180-degrees) to the previous pupil.
# Second, negate the coordinates of the box used.
dEP2box = apodReimaged[indBoxAS]*dEP2box # Apply 180deg-rotated SP mask.
dEP3box = np.rot90(dEP2box, k=NrelayFactor*2*mp.Nrelay2to3) # Forward propagate the cropped box by rotating 180 degrees mp.Nrelay2to3 times.
# Negate and reverse coordinate values to effectively rotate by 180 degrees. No change if 360 degree rotation.
if np.mod(NrelayFactor*mp.Nrelay2to3, 2) == 1:
x_box = -1*x_box[::-1]
y_box = -1*y_box[::-1]
# Matrices for the MFT from the pupil P3 to the focal plane mask
rect_mat_pre = (np.exp(-2*np.pi*1j*np.outer(mp.F3.compact.etas,y_box)/(wvl*mp.fl)))*np.sqrt(mp.P2.compact.dx*mp.P2.compact.dx)*np.sqrt(mp.F3.compact.dxi*mp.F3.compact.deta)/(wvl*mp.fl)
rect_mat_post = (np.exp(-2*np.pi*1j*np.outer(x_box, mp.F3.compact.xis)/(wvl*mp.fl)))
EF3inc = rect_mat_pre @ dEP3box @ rect_mat_post # MFT to FPM
if mp.coro.upper() in ('LC', 'APLC', 'HLC'):
# Propagate through (1 - FPM) for Babinet's principle
EF3 = (transOuterFPM-fpm) * EF3inc
# MFT to LS ("Sub" name for Subtrahend part of the Lyot-plane E-field)
EP4sub = fp.mft_f2p(EF3, mp.fl, wvl, mp.F3.compact.dxi, mp.F3.compact.deta, mp.P4.compact.dx, mp.P4.compact.Narr, mp.centering)
EP4sub = fp.relay(EP4sub, NrelayFactor*mp.Nrelay3to4-1, mp.centering)
# Full Lyot plane pupil (for Babinet)
EP4noFPM = np.zeros((mp.dm1.compact.NdmPad, mp.dm1.compact.NdmPad),dtype=complex)
EP4noFPM[indBoxAS] = dEP2box # Propagating the E-field from P2 to P4 without masks gives the same E-field.
EP4noFPM = fp.relay(EP4noFPM, NrelayFactor*(mp.Nrelay2to3+mp.Nrelay3to4), mp.centering) # Get the correct orientation
EP4noFPM = pad_crop(EP4noFPM, mp.P4.compact.Narr) # Crop down to the size of the Lyot stop opening
EP4 = transOuterFPM*EP4noFPM - EP4sub # Babinet's principle to get E-field at Lyot plane
elif mp.coro.upper() in ('FLC', 'SPLC'):
EF3 = fpm * EF3inc # Apply FPM
# MFT to Lyot plane
EP4 = fp.mft_f2p(EF3, mp.fl,wvl, mp.F3.compact.dxi, mp.F3.compact.deta, mp.P4.compact.dx, mp.P4.compact.Narr, mp.centering)
EP4 = fp.relay(EP4, NrelayFactor*mp.Nrelay3to4-1, mp.centering) # Get the correct orientation
EP4 *= mp.P4.compact.croppedMask # Apply Lyot stop
# MFT to camera
EP4 = fp.relay(EP4, NrelayFactor*mp.NrelayFend, mp.centering) # Rotate the final image 180 degrees if necessary
EFend = fp.mft_p2f(EP4, mp.fl,wvl, mp.P4.compact.dx, mp.Fend.dxi, mp.Fend.Nxi, mp.Fend.deta, mp.Fend.Neta, mp.centering)
Gzdl[:, Gindex] = EFend[mp.Fend.corr.maskBool]/np.sqrt(mp.Fend.compact.I00[modvar.sbpIndex])
Gindex += 1
""" ---------- DM2 ---------- """
if idm == 2:
Gzdl = np.zeros((mp.Fend.corr.Npix, mp.dm2.Nele), dtype=complex)
# Two array sizes (at same resolution) of influence functions for MFT and angular spectrum
NboxPad2AS = int(mp.dm2.compact.NboxAS)
mp.dm2.compact.xy_box_lowerLeft_AS = mp.dm2.compact.xy_box_lowerLeft - (NboxPad2AS-mp.dm2.compact.Nbox)/2 # Account for the padding of the influence function boxes
apodReimaged = pad_crop(apodReimaged, mp.dm2.compact.NdmPad)
DM2stopPad = pad_crop(DM2stop, mp.dm2.compact.NdmPad)
Edm2WFEpad = pad_crop(Edm2WFE, mp.dm2.compact.NdmPad)
# Propagate full field to DM2 before back-propagating in small boxes
Edm2inc = pad_crop(fp.ptp(Edm1out, mp.compact.NdmPad*mp.P2.compact.dx, wvl, mp.d_dm1_dm2), mp.dm2.compact.NdmPad) # E-field incident upon DM2
Edm2inc = pad_crop(Edm2inc, mp.dm2.compact.NdmPad)
Edm2 = DM2stopPad*Edm2WFEpad*Edm2inc*np.exp(mirrorFac*2*np.pi*1j/wvl*pad_crop(DM2surf, mp.dm2.compact.NdmPad)) # Initial E-field at DM2 including its own phase contribution
# Propagate each actuator from DM2 through the rest of the optical system
Gindex = 0 # initialize index counter
for iact in mp.dm2.act_ele:
if np.sum(np.abs(mp.dm2.compact.inf_datacube[:, :, iact])) > 1e-12: # Only compute for acutators specified for use or for influence functions that are not zeroed out
# x- and y- coordinates of the padded influence function in the full padded pupil
x_box_AS_ind = np.arange(mp.dm2.compact.xy_box_lowerLeft_AS[0, iact], mp.dm2.compact.xy_box_lowerLeft_AS[0, iact]+NboxPad2AS, dtype=int) # x-indices in pupil arrays for the box
y_box_AS_ind = np.arange(mp.dm2.compact.xy_box_lowerLeft_AS[1, iact], mp.dm2.compact.xy_box_lowerLeft_AS[1, iact]+NboxPad2AS, dtype=int) # y-indices in pupil arrays for the box
indBoxAS = np.ix_(y_box_AS_ind, x_box_AS_ind)
# x- and y- coordinates of the UN-padded influence function in the full padded pupil
x_box = mp.dm2.compact.x_pupPad[x_box_AS_ind] # full pupil x-coordinates of the box
y_box = mp.dm2.compact.y_pupPad[y_box_AS_ind] # full pupil y-coordinates of the box
dEbox = (mp.dm2.VtoH.reshape(mp.dm2.Nact**2)[iact])*(mirrorFac*2*np.pi*1j/wvl)*pad_crop(np.squeeze(mp.dm2.compact.inf_datacube[:, :, iact]), NboxPad2AS) # the padded influence function at DM2
dEP2box = fp.ptp(dEbox*Edm2[indBoxAS], mp.P2.compact.dx*NboxPad2AS, wvl, -1*(mp.d_dm1_dm2 + mp.d_P2_dm1)) # back-propagate to pupil P2
# dEP2box = ptp_inf_func(dEbox.*Edm2(y_box_AS_ind,x_box_AS_ind), mp.P2.compact.dx*NboxPad2AS,wvl,-1*(mp.d_dm1_dm2 + mp.d_P2_dm1), mp.dm2.dm_spacing, mp.propMethodPTP); # back-propagate to pupil P2
# To simulate going forward to the next pupil plane (with the apodizer) most efficiently,
# First, back-propagate the apodizer (by rotating 180-degrees) to the previous pupil.
# Second, negate the coordinates of the box used.
dEP2box = apodReimaged[indBoxAS]*dEP2box # Apply 180deg-rotated SP mask.
dEP3box = np.rot90(dEP2box, k=2*NrelayFactor*mp.Nrelay2to3) # Forward propagate the cropped box by rotating 180 degrees mp.Nrelay2to3 times.
# Negate and rotate coordinates to effectively rotate by 180 degrees. No change if 360 degree rotation.
if np.mod(NrelayFactor*mp.Nrelay2to3, 2) == 1:
x_box = -1*x_box[::-1]
y_box = -1*y_box[::-1]
# Matrices for the MFT from the pupil P3 to the focal plane mask
rect_mat_pre = np.exp(-2*np.pi*1j*np.outer(mp.F3.compact.etas, y_box)/(wvl*mp.fl))*np.sqrt(mp.P2.compact.dx*mp.P2.compact.dx)*np.sqrt(mp.F3.compact.dxi*mp.F3.compact.deta)/(wvl*mp.fl)
rect_mat_post = np.exp(-2*np.pi*1j*np.outer(x_box, mp.F3.compact.xis)/(wvl*mp.fl))
EF3inc = rect_mat_pre @ dEP3box @ rect_mat_post # MFT to FPM
if mp.coro.upper() in ('LC', 'APLC', 'HLC'):
# Propagate through (1 - fpm) for Babinet's principle
EF3 = (transOuterFPM-fpm) * EF3inc
# MFT to LS ("Sub" name for Subtrahend part of the Lyot-plane E-field)
EP4sub = fp.mft_f2p(EF3, mp.fl, wvl, mp.F3.compact.dxi, mp.F3.compact.deta, mp.P4.compact.dx, mp.P4.compact.Narr, mp.centering) # Subtrahend term for the Lyot plane E-field
EP4sub = fp.relay(EP4sub, NrelayFactor*mp.Nrelay3to4-1, mp.centering) # Get the correct orientation
EP4noFPM = np.zeros((mp.dm2.compact.NdmPad, mp.dm2.compact.NdmPad), dtype=complex)
EP4noFPM[indBoxAS] = dEP2box # Propagating the E-field from P2 to P4 without masks gives the same E-field.
EP4noFPM = fp.relay(EP4noFPM, NrelayFactor*(mp.Nrelay2to3+mp.Nrelay3to4), mp.centering) # Get the number or re-imaging relays between pupils P3 and P4.
EP4noFPM = pad_crop(EP4noFPM, mp.P4.compact.Narr) # Crop down to the size of the Lyot stop opening
EP4 = transOuterFPM*EP4noFPM - EP4sub # Babinet's principle to get E-field at Lyot plane
elif mp.coro.upper() in ('FLC', 'SPLC'):
EF3 = fpm * EF3inc # Apply FPM
# MFT to LS ("Sub" name for Subtrahend part of the Lyot-plane E-field)
EP4 = fp.mft_f2p(EF3, mp.fl, wvl, mp.F3.compact.dxi, mp.F3.compact.deta, mp.P4.compact.dx, mp.P4.compact.Narr, mp.centering)
EP4 = fp.relay(EP4, NrelayFactor*mp.Nrelay3to4-1, mp.centering)
EP4 *= mp.P4.compact.croppedMask # Apply Lyot stop
# MFT to detector
EP4 = fp.relay(EP4, NrelayFactor*mp.NrelayFend, mp.centering) # Rotate the final image 180 degrees if necessary
EFend = fp.mft_p2f(EP4, mp.fl, wvl, mp.P4.compact.dx, mp.Fend.dxi, mp.Fend.Nxi, mp.Fend.deta, mp.Fend.Neta, mp.centering)
Gzdl[:, Gindex] = EFend[mp.Fend.corr.maskBool]/np.sqrt(mp.Fend.compact.I00[modvar.sbpIndex])
Gindex += 1
""" ---------- DM9 (HLC only) ---------- """
if idm == 9:
Gzdl = np.zeros((mp.Fend.corr.Npix, mp.dm9.Nele), dtype=complex)
Nbox9 = int(mp.dm9.compact.Nbox)
# Adjust the step size in the Jacobian, then divide back out. Used for
# helping counteract effect of discretization.
if not hasattr(mp.dm9, 'stepFac'):
stepFac = 20
else:
stepFac = mp.dm9.stepFac
# Propagate from DM1 to DM2, and apply DM2 surface and aperture stop
Edm2 = Edm2WFE * DM2stop * np.exp(mirrorFac*2*np.pi*1j*DM2surf/wvl) * \
fp.ptp(Edm1out, mp.P2.compact.dx*NdmPad, wvl, mp.d_dm1_dm2)
# Back-propagate to pupil P2
dz2 = mp.d_P2_dm1 + mp.d_dm1_dm2
if dz2 < 10*wvl:
EP2eff = Edm2
else:
EP2eff = fp.ptp(Edm2, mp.P2.compact.dx*NdmPad, wvl, -dz2)
# Rotate 180 degrees mp.Nrelay2to3 times to go from pupil P2 to P3
EP3 = fp.relay(EP2eff, NrelayFactor*mp.Nrelay2to3, mp.centering)
# Apply apodizer mask
if mp.flagApod:
EP3 = mp.P3.compact.mask * pad_crop(EP3, mp.P1.compact.Narr)
# MFT from pupil P3 to FPM (at focus F3)
EF3inc = fp.mft_p2f(EP3, mp.fl, wvl, mp.P2.compact.dx, mp.F3.compact.dxi, mp.F3.compact.Nxi, mp.F3.compact.deta, mp.F3.compact.Neta, mp.centering)
EF3inc = pad_crop(EF3inc, mp.dm9.compact.NdmPad)
# Coordinates for metal thickness and dielectric thickness
DM8transIndAll = falco.hlc.discretize_fpm_surf(mp.dm8.surf, mp.t_metal_nm_vec, mp.dt_metal_nm) # All of the mask
# Propagate each actuator from DM2 through the rest of the optical system
Gindex = 0 # initialize index counter
for iact in mp.dm9.act_ele:
if np.sum(np.abs(mp.dm9.compact.inf_datacube[:, :, iact])) > 1e-12: # Only compute for acutators specified for use or for influence functions that are not zeroed out
# xi- and eta- coordinates in the full FPM portion of the focal plane
xyLL = mp.dm9.compact.xy_box_lowerLeft[:, iact]
xi_box_ind = np.arange(xyLL[0], xyLL[0]+Nbox9, dtype=int) # xi-indices in focal arrays for the box
eta_box_ind = np.arange(xyLL[1], xyLL[1]+Nbox9, dtype=int) # eta-indices in focal arrays for the box
indBox = np.ix_(eta_box_ind, xi_box_ind)
xi_box = mp.dm9.compact.x_pupPad[xi_box_ind]
eta_box = mp.dm9.compact.y_pupPad[eta_box_ind]
# Obtain values for the "poked" FPM's complex transmission (only in the sub-array where poked)
Nxi = Nbox9
Neta = Nbox9
DM9surfCropNew = stepFac*mp.dm9.VtoH[iact]*mp.dm9.compact.inf_datacube[:, :, iact] + mp.dm9.surf[indBox] # New DM9 surface profile in the poked region (meters)
DM9transInd = falco.hlc.discretize_fpm_surf(DM9surfCropNew, mp.t_diel_nm_vec, mp.dt_diel_nm)
DM8transInd = DM8transIndAll[indBox] # Cropped region of the FPM.
# Look up table to compute complex transmission coefficient of the FPM at each pixel
fpmPoked = np.zeros((Neta, Nxi), dtype=complex) # Initialize output array of FPM's complex transmission
for ix in range(Nxi):
for iy in range(Neta):
ind_metal = DM8transInd[iy, ix]
ind_diel = DM9transInd[iy, ix]
fpmPoked[iy, ix] = mp.complexTransCompact[ind_diel, ind_metal, modvar.sbpIndex]
dEF3box = ((transOuterFPM-fpmPoked) - (transOuterFPM-fpm[indBox])) * EF3inc[indBox] # Delta field (in a small region) at the FPM
# Matrices for the MFT from the FPM stamp to the Lyot stop
rect_mat_pre = np.exp(-2*np.pi*1j*np.outer(mp.P4.compact.ys, eta_box)/(wvl*mp.fl)) *\
np.sqrt(mp.P4.compact.dx*mp.P4.compact.dx)*np.sqrt(mp.F3.compact.dxi*mp.F3.compact.deta)/(wvl*mp.fl)
rect_mat_post = np.exp(-2*np.pi*1j*np.outer(xi_box, mp.P4.compact.xs)/(wvl*mp.fl))
# MFT from FPM to Lyot stop (Nominal term transOuterFPM*EP4noFPM subtracts out to 0 since it ignores the FPM change).
EP4 = 0 - rect_mat_pre @ dEF3box @ rect_mat_post # MFT from FPM (F3) to Lyot stop plane (P4)
EP4 = fp.relay(EP4, NrelayFactor*mp.Nrelay3to4-1, mp.centering)
EP4 = mp.P4.compact.croppedMask * EP4 # Apply Lyot stop
# MFT to final focal plane
EP4 = fp.relay(EP4, NrelayFactor*mp.NrelayFend, mp.centering)
EFend = fp.mft_p2f(EP4, mp.fl, wvl, mp.P4.compact.dx, mp.Fend.dxi, mp.Fend.Nxi, mp.Fend.deta, mp.Fend.Neta, mp.centering)
Gzdl[:, Gindex] = mp.dm9.act_sens / stepFac * mp.dm9.weight*EFend[mp.Fend.corr.maskBool] / np.sqrt(mp.Fend.compact.I00[modvar.sbpIndex])
Gindex += 1
return Gzdl
def vortex(mp, im, idm):
"""
Differential model used to compute ctrl Jacobian for vortex coronagraph.
Specialized compact model used to compute the DM response matrix, aka the
control Jacobian for a vortex coronagraph. Can include an apodizer, making
it an apodized vortex coronagraph (AVC). Does not include unknown
aberrations of the full, "truth" model. This model propagates the
first-order Taylor expansion of the phase from the poke of each actuator
of the deformable mirror.
Parameters
----------
mp : ModelParameters
Structure containing optical model parameters
Returns
-------
Gzdl : numpy ndarray
Complex-valued, 2-D array containing the Jacobian for the
specified Zernike mode, DM number, and wavelength.
"""
modvar = falco.config.Object() # Initialize the new structure
modvar.sbpIndex = mp.jac.sbp_inds[im]
modvar.zernIndex = mp.jac.zern_inds[im]
wvl = mp.sbp_centers[modvar.sbpIndex]
mirrorFac = 2. # Phase change is twice the DM surface height.
NdmPad = int(mp.compact.NdmPad)
if mp.flagRotation:
NrelayFactor = 1
else:
NrelayFactor = 0 # zero out the number of relays
# Minimum FPM resolution for Jacobian calculations (in pixels per lambda/D)
minPadFacVortex = 8
# Get FPM charge
if type(mp.F3.VortexCharge) == np.ndarray:
# Passing an array for mp.F3.VortexCharge with
# corresponding wavelengths mp.F3.VortexCharge_lambdas
# represents a chromatic vortex FPM
if mp.F3.VortexCharge.size == 1:
charge = mp.F3.VortexCharge
else:
np.interp(wvl, mp.F3.VortexCharge_lambdas, mp.F3.VortexCharge,
'linear', 'extrap')
elif type(mp.F3.VortexCharge) == int or type(mp.F3.VortexCharge) == float:
# single value indicates fully achromatic mask
charge = mp.F3.VortexCharge
else:
raise TypeError("mp.F3.VortexCharge must be an int, float, or numpy ndarray.")
"""Input E-fields"""
Ein = np.squeeze(mp.P1.compact.E[:, :, modvar.sbpIndex])
# Apply a Zernike (in amplitude) at input pupil
# Used only for Zernike sensitivity control, which requires the perfect
# E-field of the differential Zernike term.
if not modvar.zernIndex == 1:
indsZnoll = modvar.zernIndex # Just send in 1 Zernike mode
zernMat = np.squeeze(falco.zern.gen_norm_zern_maps(mp.P1.compact.Nbeam,
mp.centering, indsZnoll))
zernMat = pad_crop(zernMat, mp.P1.compact.Narr)
Ein = Ein*zernMat*(2*np.pi/wvl) * \
mp.jac.Zcoef[mp.jac.zerns == modvar.zernIndex]
""" Masks and DM surfaces """
pupil = pad_crop(mp.P1.compact.mask, NdmPad)
Ein = pad_crop(Ein, NdmPad)
# Re-image the apodizer from pupil P3 back to pupil P2.
if(mp.flagApod):
apodReimaged = pad_crop(mp.P3.compact.mask, NdmPad)
apodReimaged = fp.relay(apodReimaged, NrelayFactor*mp.Nrelay2to3, mp.centering)
else:
apodReimaged = np.ones((NdmPad, NdmPad))
# Compute the DM surfaces for the current DM commands
if any(mp.dm_ind == 1):
DM1surf = pad_crop(mp.dm1.compact.surfM, NdmPad)
# DM1surf = falco.dm.gen_surf_from_act(mp.dm1, mp.dm1.compact.dx, NdmPad)
else:
DM1surf = np.zeros((NdmPad, NdmPad))
if any(mp.dm_ind == 2):
DM2surf = pad_crop(mp.dm2.compact.surfM, NdmPad)
# DM2surf = falco.dm.gen_surf_from_act(mp.dm2, mp.dm2.compact.dx, NdmPad)
else:
DM2surf = np.zeros((NdmPad, NdmPad))
if(mp.flagDM1stop):
DM1stop = pad_crop(mp.dm1.compact.mask, NdmPad)
else:
DM1stop = np.ones((NdmPad, NdmPad))
if(mp.flagDM2stop):
DM2stop = pad_crop(mp.dm2.compact.mask, NdmPad)
else:
DM2stop = np.ones((NdmPad, NdmPad))
# This block is for BMC surface error testing
if(mp.flagDMwfe):
if any(mp.dm_ind == 1):
Edm1WFE = np.exp(2*np.pi*1j/wvl*pad_crop(mp.dm1.compact.wfe,
NdmPad, 'extrapval', 0))
else:
Edm1WFE = np.ones((NdmPad, NdmPad))
if any(mp.dm_ind == 2):
Edm2WFE = np.exp(2*np.pi*1j/wvl*pad_crop(mp.dm2.compact.wfe,
NdmPad, 'extrapval', 0))
else:
Edm2WFE = np.ones((NdmPad, NdmPad))
else:
Edm1WFE = np.ones((NdmPad, NdmPad))
Edm2WFE = np.ones((NdmPad, NdmPad))
"""Propagation"""
# Define pupil P1 and Propagate to pupil P2
EP1 = pupil*Ein # E-field at pupil plane P1
EP2 = fp.relay(EP1, NrelayFactor*mp.Nrelay1to2, mp.centering)
# Propagate from P2 to DM1, and apply DM1 surface and aperture stop
if not (abs(mp.d_P2_dm1) == 0): # E-field arriving at DM1
Edm1 = fp.ptp(EP2, mp.P2.compact.dx*NdmPad, wvl, mp.d_P2_dm1)
else:
Edm1 = EP2
Edm1out = Edm1*Edm1WFE*DM1stop*np.exp(mirrorFac*2*np.pi*1j*DM1surf/wvl)
""" ---------- DM1 ---------- """
if idm == 1:
Gzdl = np.zeros((mp.Fend.corr.Npix, mp.dm1.Nele), dtype=complex)
# Array size for planes P3, F3, and P4
Nfft1 = int(2**falco.util.nextpow2(np.max(np.array([mp.dm1.compact.NdmPad, minPadFacVortex*mp.dm1.compact.Nbox])))) # Don't crop--but do pad if necessary.
# Generate vortex FPM with fftshift already applied
fftshiftVortex = fftshift(falco.mask.falco_gen_vortex_mask(charge, Nfft1))
# Two array sizes (at same resolution) of influence functions for MFT and angular spectrum
NboxPad1AS = int(mp.dm1.compact.NboxAS) # array size for FFT-AS propagations from DM1->DM2->DM1
mp.dm1.compact.xy_box_lowerLeft_AS = mp.dm1.compact.xy_box_lowerLeft - (mp.dm1.compact.NboxAS-mp.dm1.compact.Nbox)/2. # Adjust the sub-array location of the influence function for the added zero padding
if any(mp.dm_ind == 2):
DM2surf = pad_crop(DM2surf, mp.dm1.compact.NdmPad)
else:
DM2surf = np.zeros((mp.dm1.compact.NdmPad, mp.dm1.compact.NdmPad))
if(mp.flagDM2stop):
DM2stop = pad_crop(DM2stop, mp.dm1.compact.NdmPad)
else:
DM2stop = np.ones((mp.dm1.compact.NdmPad, mp.dm1.compact.NdmPad))
apodReimaged = pad_crop(apodReimaged, mp.dm1.compact.NdmPad)
Edm1pad = pad_crop(Edm1out, mp.dm1.compact.NdmPad) # Pad or crop for expected sub-array indexing
Edm2WFEpad = pad_crop(Edm2WFE, mp.dm1.compact.NdmPad) # Pad or crop for expected sub-array indexing
# Propagate each actuator from DM1 through the optical system
Gindex = 0 # initialize index counter
for iact in mp.dm1.act_ele:
# Compute only for influence functions that are not zeroed out
if np.sum(np.abs(mp.dm1.compact.inf_datacube[:, :, iact])) > 1e-12:
# x- and y- coordinate indices of the padded influence function in the full padded pupil
x_box_AS_ind = np.arange(mp.dm1.compact.xy_box_lowerLeft_AS[0, iact], mp.dm1.compact.xy_box_lowerLeft_AS[0, iact]+NboxPad1AS, dtype=int) # x-indices in pupil arrays for the box
y_box_AS_ind = np.arange(mp.dm1.compact.xy_box_lowerLeft_AS[1, iact], mp.dm1.compact.xy_box_lowerLeft_AS[1 ,iact]+NboxPad1AS, dtype=int) # y-indices in pupil arrays for the box
indBoxAS = np.ix_(y_box_AS_ind, x_box_AS_ind)
# x- and y- coordinates of the UN-padded influence function in the full padded pupil
x_box = mp.dm1.compact.x_pupPad[x_box_AS_ind] # full pupil x-coordinates of the box
y_box = mp.dm1.compact.y_pupPad[y_box_AS_ind] # full pupil y-coordinates of the box
# Propagate from DM1 to DM2, and then back to P2
dEbox = (mirrorFac*2*np.pi*1j/wvl)*pad_crop((mp.dm1.VtoH.reshape(mp.dm1.Nact**2)[iact])*np.squeeze(mp.dm1.compact.inf_datacube[:, :, iact]), NboxPad1AS) # Pad influence function at DM1 for angular spectrum propagation.
dEbox = fp.ptp(dEbox*Edm1pad[indBoxAS], mp.P2.compact.dx*NboxPad1AS,wvl, mp.d_dm1_dm2) # forward propagate to DM2 and apply DM2 E-field
dEP2box = fp.ptp(dEbox*Edm2WFEpad[indBoxAS]*DM2stop[indBoxAS]*np.exp(mirrorFac*2*np.pi*1j/wvl*DM2surf[indBoxAS]), mp.P2.compact.dx*NboxPad1AS,wvl,-1*(mp.d_dm1_dm2 + mp.d_P2_dm1)) # back-propagate to DM1
# dEbox = fp.ptp_inf_func(dEbox*Edm1pad[np.ix_(y_box_AS_ind,x_box_AS_ind)], mp.P2.compact.dx*NboxPad1AS,wvl, mp.d_dm1_dm2, mp.dm1.dm_spacing, mp.propMethodPTP) # forward propagate to DM2 and apply DM2 E-field
# dEP2box = fp.ptp_inf_func(dEbox.*Edm2WFEpad[np.ix_(y_box_AS_ind,x_box_AS_ind)]*DM2stop(y_box_AS_ind,x_box_AS_ind).*exp(mirrorFac*2*np.pi*1j/wvl*DM2surf(y_box_AS_ind,x_box_AS_ind)), mp.P2.compact.dx*NboxPad1AS,wvl,-1*(mp.d_dm1_dm2 + mp.d_P2_dm1), mp.dm1.dm_spacing, mp.propMethodPTP ) # back-propagate to DM1
#
# To simulate going forward to the next pupil plane (with the apodizer) most efficiently,
# First, back-propagate the apodizer (by rotating 180-degrees) to the previous pupil.
# Second, negate the coordinates of the box used.
dEP2boxEff = apodReimaged[indBoxAS]*dEP2box # Apply 180deg-rotated apodizer mask.
# dEP3box = np.rot90(dEP2box,k=2*mp.Nrelay2to3) # Forward propagate the cropped box by rotating 180 degrees mp.Nrelay2to3 times.
# # Negate and reverse coordinate values to effectively rotate by 180 degrees. No change if 360 degree rotation.
# Re-insert the window around the influence function back into the full beam array.
EP2eff = np.zeros((mp.dm1.compact.NdmPad, mp.dm1.compact.NdmPad), dtype=complex)
EP2eff[indBoxAS] = dEP2boxEff
# Forward propagate from P2 (effective) to P3
EP3 = fp.relay(EP2eff, NrelayFactor*mp.Nrelay2to3, mp.centering)
# Pad pupil P3 for FFT
EP3pad = pad_crop(EP3, Nfft1)
# FFT from P3 to Fend.and apply vortex
EF3 = fftshiftVortex*fft2(fftshift(EP3pad))/Nfft1
# FFT from Vortex FPM to Lyot Plane
EP4 = fftshift(fft2(EF3))/Nfft1
EP4 = fp.relay(EP4, NrelayFactor*mp.Nrelay3to4-1, mp.centering) # Add more re-imaging relays if necessary
if(Nfft1 > mp.P4.compact.Narr):
EP4 = mp.P4.compact.croppedMask*pad_crop(EP4, mp.P4.compact.Narr) # Crop EP4 and then apply Lyot stop
else:
EP4 = pad_crop(mp.P4.compact.croppedMask, Nfft1)*EP4 # Crop the Lyot stop and then apply it.
pass
# MFT to camera
EP4 = fp.relay(EP4, NrelayFactor*mp.NrelayFend, mp.centering) # Rotate the final image 180 degrees if necessary
EFend = fp.mft_p2f(EP4, mp.fl, wvl, mp.P4.compact.dx, mp.Fend.dxi, mp.Fend.Nxi, mp.Fend.deta, mp.Fend.Neta, mp.centering)
Gzdl[:, Gindex] = EFend[mp.Fend.corr.maskBool]/np.sqrt(mp.Fend.compact.I00[modvar.sbpIndex])
Gindex += 1
""" ---------- DM2 ---------- """
if idm == 2:
Gzdl = np.zeros((mp.Fend.corr.Npix, mp.dm2.Nele), dtype=complex)
# Array size for planes P3, F3, and P4
Nfft2 = int(2**falco.util.nextpow2(np.max(np.array([mp.dm2.compact.NdmPad, minPadFacVortex*mp.dm2.compact.Nbox])))) # Don't crop--but do pad if necessary.
# Generate vortex FPM with fftshift already applied
fftshiftVortex = fftshift(falco.mask.falco_gen_vortex_mask(charge, Nfft2))
# Two array sizes (at same resolution) of influence functions for MFT and angular spectrum
NboxPad2AS = int(mp.dm2.compact.NboxAS)
mp.dm2.compact.xy_box_lowerLeft_AS = mp.dm2.compact.xy_box_lowerLeft - (NboxPad2AS-mp.dm2.compact.Nbox)/2 # Account for the padding of the influence function boxes
apodReimaged = pad_crop(apodReimaged, mp.dm2.compact.NdmPad)
DM2stopPad = pad_crop(DM2stop, mp.dm2.compact.NdmPad)
Edm2WFEpad = pad_crop(Edm2WFE, mp.dm2.compact.NdmPad)
# Propagate full field to DM2 before back-propagating in small boxes
Edm2inc = pad_crop(fp.ptp(Edm1out, mp.compact.NdmPad*mp.P2.compact.dx,wvl, mp.d_dm1_dm2), mp.dm2.compact.NdmPad) # E-field incident upon DM2
Edm2inc = pad_crop(Edm2inc, mp.dm2.compact.NdmPad);
Edm2 = DM2stopPad * Edm2WFEpad * Edm2inc * np.exp(mirrorFac*2*np.pi*1j/wvl * pad_crop(DM2surf, mp.dm2.compact.NdmPad)) # Initial E-field at DM2 including its own phase contribution
# Propagate each actuator from DM2 through the rest of the optical system
Gindex = 0 # initialize index counter
for iact in mp.dm2.act_ele:
# Only compute for acutators specified for use or for influence functions that are not zeroed out
if np.sum(np.abs(mp.dm2.compact.inf_datacube[:, :, iact])) > 1e-12:
# x- and y- coordinates of the padded influence function in the full padded pupil
x_box_AS_ind = np.arange(mp.dm2.compact.xy_box_lowerLeft_AS[0, iact], mp.dm2.compact.xy_box_lowerLeft_AS[0, iact]+NboxPad2AS, dtype=int) # x-indices in pupil arrays for the box
y_box_AS_ind = np.arange(mp.dm2.compact.xy_box_lowerLeft_AS[1, iact], mp.dm2.compact.xy_box_lowerLeft_AS[1, iact]+NboxPad2AS, dtype=int) # y-indices in pupil arrays for the box
indBoxAS = np.ix_(y_box_AS_ind, x_box_AS_ind)
# # x- and y- coordinates of the UN-padded influence function in the full padded pupil
# x_box = mp.dm2.compact.x_pupPad[x_box_AS_ind] # full pupil x-coordinates of the box
# y_box = mp.dm2.compact.y_pupPad[y_box_AS_ind] # full pupil y-coordinates of the box
dEbox = (mp.dm2.VtoH.reshape(mp.dm2.Nact**2)[iact])*(mirrorFac*2*np.pi*1j/wvl)*pad_crop(np.squeeze(mp.dm2.compact.inf_datacube[:, :, iact]), NboxPad2AS) # the padded influence function at DM2
dEP2box = fp.ptp(dEbox*Edm2[indBoxAS], mp.P2.compact.dx*NboxPad2AS, wvl, -1*(mp.d_dm1_dm2 + mp.d_P2_dm1)) # back-propagate to pupil P2
# dEP2box = ptp_inf_func(dEbox.*Edm2(y_box_AS_ind,x_box_AS_ind), mp.P2.compact.dx*NboxPad2AS,wvl,-1*(mp.d_dm1_dm2 + mp.d_P2_dm1), mp.dm2.dm_spacing, mp.propMethodPTP); # back-propagate to pupil P2
# To simulate going forward to the next pupil plane (with the apodizer) most efficiently,
# First, back-propagate the apodizer (by rotating 180-degrees) to the previous pupil.
# Second, negate the coordinates of the box used.
dEP2boxEff = apodReimaged[indBoxAS]*dEP2box
# dEP3box = np.rot90(dEP2box,k=2*mp.Nrelay2to3) # Forward propagate the cropped box by rotating 180 degrees mp.Nrelay2to3 times.
# # Negate and rotate coordinates to effectively rotate by 180 degrees. No change if 360 degree rotation.
# if np.mod(mp.Nrelay2to3,2)==1:
# x_box = -1*x_box[::-1]
# y_box = -1*y_box[::-1]
EP2eff = np.zeros((mp.dm2.compact.NdmPad, mp.dm2.compact.NdmPad), dtype=complex)
EP2eff[indBoxAS] = dEP2boxEff
# Forward propagate from P2 (effective) to P3
EP3 = fp.relay(EP2eff, NrelayFactor*mp.Nrelay2to3, mp.centering)
# Pad pupil P3 for FFT
EP3pad = pad_crop(EP3, Nfft2)
# FFT from P3 to Fend.and apply vortex
EF3 = fftshiftVortex*fft2(fftshift(EP3pad))/Nfft2
# FFT from Vortex FPM to Lyot Plane
EP4 = fftshift(fft2(EF3))/Nfft2
EP4 = fp.relay(EP4, NrelayFactor*mp.Nrelay3to4-1, mp.centering)
if(Nfft2 > mp.P4.compact.Narr):
EP4 = mp.P4.compact.croppedMask * pad_crop(EP4, mp.P4.compact.Narr)
else:
EP4 = pad_crop(mp.P4.compact.croppedMask, Nfft2) * EP4
# MFT to detector
EP4 = fp.relay(EP4, NrelayFactor*mp.NrelayFend, mp.centering)
EFend = fp.mft_p2f(EP4, mp.fl, wvl, mp.P4.compact.dx, mp.Fend.dxi, mp.Fend.Nxi, mp.Fend.deta, mp.Fend.Neta, mp.centering)
Gzdl[:, Gindex] = EFend[mp.Fend.corr.maskBool] / \
np.sqrt(mp.Fend.compact.I00[modvar.sbpIndex])
Gindex += 1
return Gzdl
| 59.189504
| 324
| 0.622796
| 5,807
| 40,604
| 4.263131
| 0.087653
| 0.014542
| 0.028114
| 0.009452
| 0.866739
| 0.855308
| 0.850541
| 0.841533
| 0.832727
| 0.826264
| 0
| 0.03699
| 0.268939
| 40,604
| 685
| 325
| 59.275912
| 0.796995
| 0.346419
| 0
| 0.692105
| 0
| 0
| 0.00627
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005263
| false
| 0.002632
| 0.013158
| 0
| 0.023684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71a1d852e88a668f8f745531172305fcdc58e251
| 20
|
py
|
Python
|
src/openue/models/__init__.py
|
ikutalilas/OpenUE
|
098bbbef3225970d7cd9d099675f1c723345fd66
|
[
"MIT"
] | 461
|
2021-08-02T04:14:12.000Z
|
2022-03-26T15:48:42.000Z
|
src/openue/models/__init__.py
|
southerndog/OpenUE
|
52bf8f0aff43d9a83727777228b523be6f4fd3d4
|
[
"MIT"
] | 21
|
2020-12-26T05:53:56.000Z
|
2022-01-26T06:47:18.000Z
|
src/openue/models/__init__.py
|
southerndog/OpenUE
|
52bf8f0aff43d9a83727777228b523be6f4fd3d4
|
[
"MIT"
] | 39
|
2021-09-07T08:04:35.000Z
|
2022-01-17T06:34:59.000Z
|
from .model import *
| 20
| 20
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
71b156d57942da47bff91f4e2363a227bdbdc673
| 2,507
|
py
|
Python
|
test.py
|
max-andr/Joint-Training-of-a-Convolutional-Network-and-a-Graphical-Model-for-Human-Pose-Estimation
|
b0a071b45e14dc2f098203127a1f55b83022ae79
|
[
"MIT"
] | 55
|
2018-02-21T00:16:15.000Z
|
2022-03-05T02:12:32.000Z
|
test.py
|
max-andr/Joint-Training-of-a-Convolutional-Network-and-a-Graphical-Model-for-Human-Pose-Estimation
|
b0a071b45e14dc2f098203127a1f55b83022ae79
|
[
"MIT"
] | 1
|
2021-06-28T07:11:58.000Z
|
2021-06-28T07:11:58.000Z
|
test.py
|
max-andr/Joint-Training-of-a-Convolutional-Network-and-a-Graphical-Model-for-Human-Pose-Estimation
|
b0a071b45e14dc2f098203127a1f55b83022ae79
|
[
"MIT"
] | 15
|
2018-06-14T11:29:18.000Z
|
2022-03-01T13:56:36.000Z
|
import numpy as np
import pickle
"""
The first part of this file is to test if the data.py prepare the data correctly
The second part of this file is to test if the data_FlIC_plus.py prepare the data correctly
"""
### The first part
n_joint = 9 # the number of joint that you want to display
y_test = np.load('y_test_flic.npy')
x_test = np.load('x_test_flic.npy')
print('x_test shape is', x_test.shape)
i = np.random.randint(0, high=x_test.shape[0])
print('Show the %dth image and the heat map for n_joint:' % i)
y_test = y_test.astype(np.float32)
y_test = y_test / 256
coords = np.zeros([2, n_joint])
img = x_test[i, :, :, :]
img = np.reshape(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))
for joint in range(n_joint):
print(joint)
hmap = y_test[i, :, :, joint]
hmap = np.reshape(hmap, (y_test.shape[1], y_test.shape[2]))
print(hmap.shape)
x, y = np.where(hmap == np.max(hmap))
print(x, y)
coords[:, joint] = [x, y]
coords = coords * 8
print('coords:', coords)
with open('pairwise_distribution.pickle', 'rb') as handle:
pairwise_distribution = pickle.load(handle)
import matplotlib.pyplot as plt
# plt.figure(1)
# plt.imshow((img))
# plt.figure(2)
# plt.imshow((hmap))
for name in ['nose_torso', 'rsho_torso', 'relb_torso', 'rwri_torso', 'rhip_torso']:
plt.imshow(pairwise_distribution[name])
plt.savefig('img/0epoch_' + name + '.png', dpi=300)
plt.clf()
### The second part
n_joint = 9 # the number of joint that you want to display
y_test = np.load('y_test_flic_plus.npy')
x_test = np.load('x_test_flic_plus.npy')
print('x_test shape is', x_test.shape)
i = np.random.randint(0, high=x_test.shape[0])
print('Show the %dth image and the heat map for n_joint:' % i)
y_test = y_test.astype(np.float32)
y_test = y_test / 256
coords = np.zeros([2, n_joint])
img = x_test[i, :, :, :]
img = np.reshape(img, (x_test.shape[1], x_test.shape[2], x_test.shape[3]))
for joint in range(n_joint):
print(joint)
hmap = y_test[i, :, :, joint]
hmap = np.reshape(hmap, (y_test.shape[1], y_test.shape[2]))
print(hmap.shape)
x, y = np.where(hmap == np.max(hmap))
print(x, y)
coords[:, joint] = [x, y]
coords = coords * 8
print('coords:', coords)
with open('pairwise_distribution_plus.pickle', 'rb') as handle:
pairwise_distribution = pickle.load(handle)
import matplotlib.pyplot as plt
plt.figure(1)
plt.imshow((img))
plt.figure(2)
plt.imshow((hmap))
plt.figure(3)
plt.imshow((pairwise_distribution['lwri_torso']))
plt.show()
| 29.845238
| 91
| 0.680096
| 442
| 2,507
| 3.708145
| 0.201357
| 0.054912
| 0.073215
| 0.024405
| 0.844417
| 0.844417
| 0.81025
| 0.81025
| 0.782184
| 0.782184
| 0
| 0.018448
| 0.156761
| 2,507
| 83
| 92
| 30.204819
| 0.756859
| 0.073793
| 0
| 0.688525
| 0
| 0
| 0.165414
| 0.028665
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.065574
| 0
| 0.065574
| 0.196721
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71c8c113b1df92f47f371a737b0ac2cea0719ba7
| 24
|
py
|
Python
|
astpretty.py
|
davemus/flake8-custom-trailing-commas
|
2933be503370cafb20d2d27b1aed5d7135b0020e
|
[
"MIT"
] | 1
|
2021-04-20T09:01:40.000Z
|
2021-04-20T09:01:40.000Z
|
astpretty.py
|
davemus/flake8-custom-trailing-commas
|
2933be503370cafb20d2d27b1aed5d7135b0020e
|
[
"MIT"
] | null | null | null |
astpretty.py
|
davemus/flake8-custom-trailing-commas
|
2933be503370cafb20d2d27b1aed5d7135b0020e
|
[
"MIT"
] | null | null | null |
yield (a, b)
yield a, b
| 8
| 12
| 0.583333
| 6
| 24
| 2.333333
| 0.5
| 0.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 24
| 2
| 13
| 12
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71df04e167183d3e94397ef8156ffca2ecb84fe0
| 36
|
py
|
Python
|
lib/datasets/__init__.py
|
Razerl/TRN.pytorch
|
f6b9054f0ed80693b45a61066f9ab9a20cf0884e
|
[
"MIT"
] | 63
|
2019-11-20T00:28:43.000Z
|
2022-03-23T03:45:13.000Z
|
lib/datasets/__init__.py
|
yuminko/TRN.pytorch
|
f2a8a1ff59679c6af58360066512e3e0b6926880
|
[
"MIT"
] | 17
|
2019-12-11T11:23:36.000Z
|
2022-03-13T08:13:31.000Z
|
lib/datasets/__init__.py
|
yuminko/TRN.pytorch
|
f2a8a1ff59679c6af58360066512e3e0b6926880
|
[
"MIT"
] | 18
|
2019-12-24T06:49:54.000Z
|
2022-03-23T09:14:41.000Z
|
from .datasets import build_dataset
| 18
| 35
| 0.861111
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
71e08aa8ae8c2dc923ec149148b401d21021c1ab
| 14,330
|
py
|
Python
|
pybind/slxos/v17r_2_00/mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class implicit_commit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls - based on the path /mpls-config/router/mpls/mpls-cmds-holder/policy/implicit-commit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__implicit_commit_all','__implicit_commit_autobw_adjustment','__implicit_commit_lsp_reoptimize_timer',)
_yang_name = 'implicit-commit'
_rest_name = 'implicit-commit'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__implicit_commit_all = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-all", rest_name="all", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-all'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for all triggers', u'alt-name': u'all'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
self.__implicit_commit_autobw_adjustment = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-autobw-adjustment", rest_name="auto-bandwidth-adjustment", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for auto-bandwidth adjustments', u'alt-name': u'auto-bandwidth-adjustment'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
self.__implicit_commit_lsp_reoptimize_timer = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-lsp-reoptimize-timer", rest_name="lsp-reoptimize-timer", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for reoptimizations', u'alt-name': u'lsp-reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-config', u'router', u'mpls', u'mpls-cmds-holder', u'policy', u'implicit-commit']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'router', u'mpls', u'policy', u'implicit-commit']
def _get_implicit_commit_all(self):
"""
Getter method for implicit_commit_all, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/implicit_commit_all (empty)
"""
return self.__implicit_commit_all
def _set_implicit_commit_all(self, v, load=False):
"""
Setter method for implicit_commit_all, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/implicit_commit_all (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_implicit_commit_all is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_implicit_commit_all() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="implicit-commit-all", rest_name="all", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-all'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for all triggers', u'alt-name': u'all'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """implicit_commit_all must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-all", rest_name="all", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-all'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for all triggers', u'alt-name': u'all'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)""",
})
self.__implicit_commit_all = t
if hasattr(self, '_set'):
self._set()
def _unset_implicit_commit_all(self):
self.__implicit_commit_all = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-all", rest_name="all", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-all'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for all triggers', u'alt-name': u'all'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
def _get_implicit_commit_autobw_adjustment(self):
"""
Getter method for implicit_commit_autobw_adjustment, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/implicit_commit_autobw_adjustment (empty)
"""
return self.__implicit_commit_autobw_adjustment
def _set_implicit_commit_autobw_adjustment(self, v, load=False):
"""
Setter method for implicit_commit_autobw_adjustment, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/implicit_commit_autobw_adjustment (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_implicit_commit_autobw_adjustment is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_implicit_commit_autobw_adjustment() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="implicit-commit-autobw-adjustment", rest_name="auto-bandwidth-adjustment", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for auto-bandwidth adjustments', u'alt-name': u'auto-bandwidth-adjustment'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """implicit_commit_autobw_adjustment must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-autobw-adjustment", rest_name="auto-bandwidth-adjustment", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for auto-bandwidth adjustments', u'alt-name': u'auto-bandwidth-adjustment'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)""",
})
self.__implicit_commit_autobw_adjustment = t
if hasattr(self, '_set'):
self._set()
def _unset_implicit_commit_autobw_adjustment(self):
self.__implicit_commit_autobw_adjustment = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-autobw-adjustment", rest_name="auto-bandwidth-adjustment", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for auto-bandwidth adjustments', u'alt-name': u'auto-bandwidth-adjustment'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
def _get_implicit_commit_lsp_reoptimize_timer(self):
"""
Getter method for implicit_commit_lsp_reoptimize_timer, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/implicit_commit_lsp_reoptimize_timer (empty)
"""
return self.__implicit_commit_lsp_reoptimize_timer
def _set_implicit_commit_lsp_reoptimize_timer(self, v, load=False):
"""
Setter method for implicit_commit_lsp_reoptimize_timer, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit/implicit_commit_lsp_reoptimize_timer (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_implicit_commit_lsp_reoptimize_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_implicit_commit_lsp_reoptimize_timer() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="implicit-commit-lsp-reoptimize-timer", rest_name="lsp-reoptimize-timer", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for reoptimizations', u'alt-name': u'lsp-reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """implicit_commit_lsp_reoptimize_timer must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-lsp-reoptimize-timer", rest_name="lsp-reoptimize-timer", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for reoptimizations', u'alt-name': u'lsp-reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)""",
})
self.__implicit_commit_lsp_reoptimize_timer = t
if hasattr(self, '_set'):
self._set()
def _unset_implicit_commit_lsp_reoptimize_timer(self):
self.__implicit_commit_lsp_reoptimize_timer = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="implicit-commit-lsp-reoptimize-timer", rest_name="lsp-reoptimize-timer", parent=self, choice=(u'implicit-commit-options', u'implicit-commit-case-selective'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable implicit commit for reoptimizations', u'alt-name': u'lsp-reoptimize-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
implicit_commit_all = __builtin__.property(_get_implicit_commit_all, _set_implicit_commit_all)
implicit_commit_autobw_adjustment = __builtin__.property(_get_implicit_commit_autobw_adjustment, _set_implicit_commit_autobw_adjustment)
implicit_commit_lsp_reoptimize_timer = __builtin__.property(_get_implicit_commit_lsp_reoptimize_timer, _set_implicit_commit_lsp_reoptimize_timer)
__choices__ = {u'implicit-commit-options': {u'implicit-commit-case-all': [u'implicit_commit_all'], u'implicit-commit-case-selective': [u'implicit_commit_autobw_adjustment', u'implicit_commit_lsp_reoptimize_timer']}}
_pyangbind_elements = {'implicit_commit_all': implicit_commit_all, 'implicit_commit_autobw_adjustment': implicit_commit_autobw_adjustment, 'implicit_commit_lsp_reoptimize_timer': implicit_commit_lsp_reoptimize_timer, }
| 74.248705
| 596
| 0.757502
| 1,951
| 14,330
| 5.287032
| 0.091748
| 0.171013
| 0.057586
| 0.07271
| 0.838488
| 0.773825
| 0.734561
| 0.730005
| 0.730005
| 0.695686
| 0
| 0.000476
| 0.120447
| 14,330
| 192
| 597
| 74.635417
| 0.817915
| 0.150942
| 0
| 0.410853
| 0
| 0.023256
| 0.392738
| 0.20986
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.062016
| 0
| 0.294574
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71e400337fa1de42a59fde9c92722c780c20ad3c
| 48
|
py
|
Python
|
opbasm/__init__.py
|
1Maxnet1/opbasm
|
bef9e446f089a6bc6cfc21f6c8e799010572daf5
|
[
"MIT"
] | 50
|
2015-06-02T11:32:11.000Z
|
2022-03-28T19:12:00.000Z
|
opbasm/__init__.py
|
1Maxnet1/opbasm
|
bef9e446f089a6bc6cfc21f6c8e799010572daf5
|
[
"MIT"
] | 22
|
2015-06-15T15:21:45.000Z
|
2022-01-19T09:18:00.000Z
|
opbasm/__init__.py
|
1Maxnet1/opbasm
|
bef9e446f089a6bc6cfc21f6c8e799010572daf5
|
[
"MIT"
] | 13
|
2015-06-02T11:51:03.000Z
|
2022-01-19T10:16:24.000Z
|
'''Main Opbasm package'''
from opbasm import *
| 12
| 25
| 0.6875
| 6
| 48
| 5.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 3
| 26
| 16
| 0.825
| 0.395833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0965f0d2664bc8b4fd51a9d2a7ca836d26c3064
| 65,382
|
py
|
Python
|
miri/datamodels/operations.py
|
JWST-MIRI/MiriTE
|
6c2f26dce506260b548c73bd33ab9e8f9f6c629d
|
[
"CNRI-Python"
] | null | null | null |
miri/datamodels/operations.py
|
JWST-MIRI/MiriTE
|
6c2f26dce506260b548c73bd33ab9e8f9f6c629d
|
[
"CNRI-Python"
] | 24
|
2019-08-09T15:03:20.000Z
|
2022-03-04T10:04:48.000Z
|
miri/datamodels/operations.py
|
JWST-MIRI/MiriTE
|
6c2f26dce506260b548c73bd33ab9e8f9f6c629d
|
[
"CNRI-Python"
] | 4
|
2019-06-16T15:03:23.000Z
|
2020-12-02T19:51:52.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Arithmetic and binary operator functions for the MIRI data model.
:Reference:
The STScI jwst.datamodels documentation.
https://jwst-pipeline.readthedocs.io/en/latest/jwst/datamodels/index.html
:History:
24 Jan 2011: Created
22 Feb 2013: Zero dimensional arrays cannot be masked.
08 Oct 2013: _shrink_dq function added, to allow masking when a DQ array
is larger than a data array.
31 Oct 2013: Improved memory management by starting a mathematical
operation with an empty object rather than a copy.
Corrected the formula used by _combine_errors_divisive.
11 Dec 2013: Mask an array only when its data quality value contains
an odd number. Corrected a typo in _generate_mask().
21 May 2014: Make sure data quality arrays are integer before using
bitwise operations.
25 Sep 2014: reserved_flags replaced by master_flags.
30 Nov 2015: Tightened up a few data type conversions, to ensure that
bit masks have the same data type before being combined.
28 Jan 2016: Changed HasMask to use the .dq attribute instead of .mask
(which is now defined as an alias).
23 Mar 2016: Documentation correction.
06 Apr 2016: Replaced throughout the use of _real_cls() by __class__(),
following changes to jwst.datamodels.model_base.DataModel.
04 May 2016: noerr option added to HasDataErrAndDq.
12 Jul 2017: set_data_fill and set_err_fill options added to HasDataErrAndDq.
27 Jun 2018: Added HasDataErrAndGroups class to be used with ramp data.
12 Mar 2019: Removed use of astropy.extern.six (since Python 2 no longer used).
12 Feb 2020: Added _check_broadcastable() methods.
02 Dec 2020: Update import of jwst base model class to JwstDataModel.
28 Sep 2021: Replaced np.bool with np.bool_
@author: Steven Beard (UKATC), Vincent Geers (UKATC)
"""
import sys
import numpy as np
import numpy.ma as ma
from miri.datamodels.dqflags import master_flags, combine_quality
# Import the STScI image model and utilities
import jwst.datamodels.util as jmutil
from jwst.datamodels import JwstDataModel
# List all classes and global functions here.
__all__ = ['are_broadcastable', 'HasMask', 'HasData', 'HasDataErrAndDq']
def are_broadcastable( *shapes ):
"""
Check whether an arbitrary list of array shapes are broadcastable.
:Parameters:
*shapes: tuple or list
A set of array shapes.
:Returns:
broadcastable: bool
True if all the shapes are broadcastable.
False if they are not broadcastable.
"""
if len(shapes) < 2:
# A single shape is always broadcastable against itself.
return True
else:
# Extract the dimensions and check they are either
# equal to each other or equal to 1.
for dim in zip(*[shape[::-1] for shape in shapes]):
if len(set(dim).union({1})) <= 2:
# Dimensions match or are 1. Try the next one.
pass
else:
# Dimensions do not match. Not broadcastable.
return False
# All dimensions are broadcastable.
return True
class HasMask(object):
"""
An abstract class which provides the binary operations relevant for
data models containing a primary mask array.
The primary mask array is assumed to be stored in an attribute
called dq.
"""
def __init__(self, dq):
if dq is not None:
self.dq = dq
# "mask" is an alias for the "dq" attribute.
@property
def mask(self):
if hasattr(self, 'dq'):
return self.dq
else:
return None
@mask.setter
def mask(self, dq):
self.dq = dq
def _check_broadcastable(self):
"""
Helper function which raises an exception if the
linked data arrays are not broadcastable.
"""
# A single data array is always broadcastable
pass
def _check_for_mask(self):
"""
Helper function which raises an exception if the object
does not contain a valid data array.
"""
if not self._isvalid(self.dq):
strg = "%s object does not contain a valid mask array" % \
self.__class__.__name__
raise AttributeError(strg)
def _isvalid(self, data):
"""
Helper function to verify that a given array, tuple or list is
not empty and has valid content.
"""
if data is None:
return False
elif isinstance(data, (list,tuple)):
if len(data) <= 0:
return False
else:
return True
elif isinstance(data, (np.ndarray)):
if data.size <= 0:
return False
else:
return True
elif not data:
return False
else:
return True
def __or__(self, other):
"""
Bitwise OR operation between this mask and another
data product or scalar.
"""
# Check this object is capable of binary operation.
self._check_for_mask()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being operated with.
# This is only a sensible operation when the scalar
# quantity is converted to an integer.
newobject.dq = self.dq | int(other)
elif isinstance(other, (np.ndarray,list,tuple)):
# A data array is being combined with this product. This should
# work provided the two arrays are broadcastable.
newobject.dq = self.dq | np.asarray(other, dtype=self.dq.dtype)
elif isinstance(other, JwstDataModel) and hasattr(other, 'dq'):
# Two mask data products are being combined together.
newobject.dq = self.dq | other.dq
else:
strg = "Cannot bitwise combine " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __xor__(self, other):
"""
Bitwise EXCLUSIVE OR operation between this mask and another
data product or scalar.
"""
# Check this object is capable of binary operation.
self._check_for_mask()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being operated with.
# This is only a sensible operation when the scalar
# quantity is converted to an integer.
newobject.dq = self.dq ^ int(other)
elif isinstance(other, (np.ndarray,list,tuple)):
# A data array is being combined with this product. This should
# work provided the two arrays are broadcastable.
newobject.dq = self.dq ^ np.asarray(other, dtype=self.dq.dtype)
elif isinstance(other, JwstDataModel) and \
hasattr(other, 'dq') and self._isvalid(other.dq):
# Two mask data products are being combined together.
newobject.dq = self.dq ^ other.dq
else:
strg = "Cannot bitwise combine " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __and__(self, other):
"""
Bitwise AND operation between this mask and another
data product or scalar.
"""
# Check this object is capable of binary operation.
self._check_for_mask()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being operated with.
# This is only a sensible operation when the scalar
# quantity is converted to an integer.
newobject.dq = self.dq & int(other)
elif isinstance(other, (np.ndarray,list,tuple)):
# A data array is being combined with this product. This should
# work provided the two arrays are broadcastable.
newobject.dq = self.dq & np.asarray(other, dtype=self.dq.dtype)
elif isinstance(other, JwstDataModel) and \
hasattr(other, 'dq') and self._isvalid(other.dq):
# Two mask data products are being combined together.
newobject.dq = self.dq & other.dq
else:
strg = "Cannot bitwise combine " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
class HasData(object):
"""
An abstract class which provides the arithmetic operations
relevant for data models containing a primary data array.
The primary data array is assumed to be stored in an attribute
called data.
"""
def __init__(self, data):
if data is not None:
self.data = data
def _check_broadcastable(self):
"""
Helper function which raises an exception if the
linked data arrays are not broadcastable.
"""
# A single data array is always broadcastable
pass
def _check_for_data(self):
"""
Helper function which raises an exception if the object
does not contain a valid data array.
"""
if not self._isvalid(self.data):
strg = "%s object does not contain a valid data array" % \
self.__class__.__name__
raise AttributeError(strg)
def _isvalid(self, data):
"""
Helper function to verify that a given array, tuple or list is
not empty and has valid content.
"""
if data is None:
return False
elif isinstance(data, (list,tuple)):
if len(data) <= 0:
return False
else:
return True
elif isinstance(data, (ma.masked_array,np.ndarray)):
if data.size <= 0:
return False
else:
return True
elif not data:
return False
else:
return True
def __add__(self, other):
"""
Add a scalar, an array or another MiriMeasuredModel object to
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being added.
newobject.data = self.data + other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being added to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data + np.asarray(other)
elif isinstance(other, JwstDataModel):
# Two data products are being added together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data + other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot add " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __sub__(self, other):
"""
Subtract a scalar, an array or another MiriMeasuredModel object
from this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being subtracted.
newobject.data = self.data - other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being subtracted to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data - np.asarray(other)
elif isinstance(other, JwstDataModel):
# Two data products are being subtracted together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data - other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot subtract " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __mul__(self, other):
"""
Multiply this MiriMeasuredModel object by a scalar, an array or
another MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being multiplied.
newobject.data = self.data * other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data * np.asarray(other)
elif isinstance(other, JwstDataModel):
# Two data products are being multiplied together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data * other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot multiply " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __truediv__(self, other):
"""
Divide this MiriMeasuredModel object by a scalar, an array or
another MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being divided.
if np.abs(other) <= sys.float_info.epsilon:
strg = "%s: Divide by scalar zero!" % self.__class__.__name__
del newobject
raise ValueError(strg)
newobject.data = self.data / other
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
# NOTE: Any divide by zero operations will be trapped by numpy.
newobject.data = self.data / np.asarray(other)
elif isinstance(other, JwstDataModel):
# The data product is being divided by another. Ensure they
# both have a valid primary data array.
# NOTE: Any divide by zero operations will be trapped by numpy.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data / other.data
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot divide " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
# In Python 3, division is the same as true division.
def __div__(self, other):
return self.__truediv__(other)
class HasDataErrAndDq(HasData):
"""
An abstract class which provides the arithmetic operations and
masking functions relevant for data models containing a data array,
error array and data quality array.
The primary, error and quality arrays are assumed to be stored in
attributes called data, err and dq.
"""
def __init__(self, data, err, dq, noerr=False):
super(HasDataErrAndDq, self).__init__(data=data)
self._data_mask = None
self._data_fill = 0.0
self._data_fill_value = None
self.noerr = noerr
if not self.noerr:
if err is not None:
self.err = err
self._err_mask = None
self._err_fill = 'max'
self._err_fill_value = None
if dq is not None:
self.dq = dq
def _check_broadcastable(self):
"""
Helper function which raises an exception if the
linked data arrays are not broadcastable.
"""
if self._isvalid(self.data):
if hasattr(self, 'err') and self._isvalid(self.err) and \
hasattr(self, 'dq') and self._isvalid(self.dq):
if not are_broadcastable( self.data.shape, self.err.shape, self.dq.shape ):
strg = "%s object does not contain broadcastable data arrays." % \
self.__class__.__name__
strg += "\n\tdata.shape=%s, err.shape=%s and dq=shape=%s" % \
(str(self.data.shape), str(self.err.shape), str(self.dq.shape))
raise TypeError(strg)
elif hasattr(self, 'err') and self._isvalid(self.err):
if not are_broadcastable( self.data.shape, self.err.shape ):
strg = "%s object does not contain broadcastable data arrays." % \
self.__class__.__name__
strg += "\n\tdata.shape=%s and err.shape=%s" % \
(str(self.data.shape), str(self.err.shape))
raise TypeError(strg)
elif hasattr(self, 'dq') and self._isvalid(self.dq):
if not are_broadcastable( self.data.shape, self.dq.shape ):
strg = "%s object does not contain broadcastable data arrays." % \
self.__class__.__name__
strg += "\n\tdata.shape=%s, and dq=shape=%s" % \
(str(self.data.shape), str(self.dq.shape))
raise TypeError(strg)
def set_data_fill(self, data_fill):
"""
Set the data fill instruction to something other than the default
of 0.0.
:Parameters:
data_fill: str or number
An instruction for how to fill the missing values within
a masked array:
* 'min': Fill with the minimum value.
* 'max': Fill with the maximum value.
* 'mean': Fill with the mean value
* 'median': Fill with the median value
* '': Fill with the default numpy value.
* Any other value is assumed to be the fill value.
"""
self._data_fill = data_fill
def set_err_fill(self, err_fill):
"""
Set the error fill instruction to something other than the default
of 'max'.
:Parameters:
err_fill: str or number
An instruction for how to fill the missing values within
a masked array:
* 'min': Fill with the minimum value.
* 'max': Fill with the maximum value.
* 'mean': Fill with the mean value
* 'median': Fill with the median value
* '': Fill with the default numpy value.
* Any other value is assumed to be the fill value.
"""
self._err_fill = err_fill
def _shrink_dq(self, dqarray):
"""
Helper function which shrinks a data quality array along
its highest axis to generate a new array of smaller size.
For example, a 3-D array of shape (3 x 3 x 2) is shrunk to
a 2-D array of shape (3 x 3). Quality flags are combined
in a bitwise manner.
"""
# Ensure the input array is of unsigned integer type
dqarray = np.asarray(dqarray, dtype=np.uint)
# The new shape has the highest dimension removed
newshape = dqarray.shape[1:]
# Start with a DQ array full of zeros
newdq = np.zeros( newshape, dtype=np.uint)
# Split the data quality array along the highest
# axis into a list of pieces.
npieces = dqarray.shape[0]
for piece in np.split(dqarray, npieces, 0):
# Convert each piece into an N-1 dimensional array of integers.
# Each should be the same size and shape as the new DQ array.
npiece = np.asarray( np.squeeze( piece ), dtype=np.uint)
# Merge each new piece into the new DQ array with a bitwise OR
newdq |= npiece
# The result should be a new mask with reduced dimensionality
return newdq
def _generate_mask(self, data, dq, bitmask=1):
"""
Use the contents of the dq array to generate a numpy mask of the
same shape as the data array.
:Parameters:
data: numpy array
The data array to be masked
dq: numpy array
The data quality array to be used to generate the mask
bitmask: unsigned int
If specified, a mask for selecting particular bits
from the data quality values.
The default of 1 will match only bit zero.
None will match any non-zero data quality value.
:Returns:
mask: numpy mask
A mask which can be used with the data array.
"""
# print("+++ Generating mask from", data, "\nand", dq,
# "\nwith bitmask", bitmask)
# A mask can only be generated when both arrays exist and
# are not empty. The DATA array and DQ array must also be
# broadcastable.
if self._isvalid(data) and dq is not None:
# Ensure the data quality array is of unsigned integer type
# so bitwise operations are possible.
dq = np.asarray(dq, dtype=np.uint)
if data.ndim < dq.ndim and jmutil.can_broadcast(dq.shape, data.shape):
# The DQ array is larger than the array being masked.
# This is a special case.
# Shrink down the DQ array until the dimensions match.
shrunk_dq = self._shrink_dq(dq)
while (shrunk_dq.ndim > data.ndim):
shrunk_dq = self._shrink_dq(shrunk_dq)
# Start with a zero (False) mask and mask off (set to True)
# all the pixels indicated by the DQ array.
maskdq = np.zeros(data.shape, dtype=np.bool_)
if bitmask is None:
# None means all bits set.
bad = np.where(shrunk_dq != 0)
else:
bad = np.where((shrunk_dq & bitmask) != 0)
maskdq[bad] = True
return maskdq
elif data.size >= dq.size and jmutil.can_broadcast(data.shape, dq.shape):
# Broadcast the DQ array onto something the same shape
# as the data array.
datadq = np.zeros(data.shape, dtype=np.uint) + dq
# Start with a zero (False) mask and mask off (set to True)
# all the pixels indicated by the DQ array.
maskdq = np.zeros(data.shape, dtype=np.bool_)
if bitmask is None:
# None means all bits set.
bad = np.where(datadq != 0)
else:
bad = np.where((datadq & bitmask) != 0)
maskdq[bad] = True
return maskdq
else:
return ma.nomask # or None
else:
return ma.nomask # or None
def _generate_fill(self, data, fill_descr):
"""
Generate a fill value for a data array based on the masked array
plus a fill description.
:Parameters:
data: numpy array
The data array to be examined.
fill_descr: str or number
An instruction for how to fill the missing values within
a masked array:
* 'min': Fill with the minimum value.
* 'max': Fill with the maximum value.
* 'mean': Fill with the mean value
* 'median': Fill with the median value
* '': Fill with the default numpy value.
* Any other value is assumed to be the fill value.
:Returns:
fill_value: number
The fill value
"""
# The data array must exist and must not be empty.
if self._isvalid(data):
if isinstance(fill_descr, str):
if fill_descr == 'min':
# Use the minimum unmasked value as the fill value
fill_value = data.min()
elif fill_descr == 'max':
# Use the maximum unmasked value as the fill value
fill_value = data.max()
elif fill_descr == 'mean':
# Use the mean unmasked value as the fill value
fill_value = data.mean()
elif fill_descr == 'median':
# Use the median unmasked value as the fill value
fill_value = data.median()
else:
# Use the default numpy fill value
fill_value = None
else:
# Assume the fill description is a number or None
fill_value = fill_descr
else:
fill_value = None
return fill_value
def _mask_array(self, data, dq, fill_value=None):
"""
Return a masked version of the given array.
NOTE: This function might introduce small rounding errors into
floating point data, so a value displayed as 3.00000005 before
masking might display as 3.000000048 afterwards. The difference
is insignificant, but it looks worse when displayed.
:Parameters:
data: numpy array
The data array to be masked
dq: numpy array
The data quality array to be used to generate the mask
fill_value: number
If specified, the value used to fill missing entries in the
data array. If not specified, a numpy default value will be
used.
:Returns:
masked_data: numpy masked array
A masked version of the original data array.
"""
maskdq = self._generate_mask(data, dq)
return ma.array(data, mask=maskdq, fill_value=fill_value)
def _combine_errors_maximum(self, error1, error2):
"""
Helper function to combine two error arrays and return the maximum.
Can be used when two data arrays are combined with a min or max
function, or are combined by resampling.
NOTE: This function is valid only when both error arrays are sampling
the same error source and you prefer to believe the most pessimistic
estimate. Use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
newerr = np.maximum(error1, error2)
else:
newerr = None
return newerr
def _combine_errors_quadrature(self, error1, error2):
"""
Helper function to combine two error arrays in quadrature.
Can be used when two data arrays are added or subtracted.
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = err1sq + err2sq
newerr = np.sqrt(sumsq)
else:
newerr = None
return newerr
def _combine_errors_multiplicative(self, error1, error2, data1, data2):
"""
Helper function to combine two error arrays in quadrature,
where each error array is weighted by a sensitivity
coefficient.
This functions can be used when two data arrays are multiplied,
so the sensitivity coefficient is proportional to the other
array's measurement data.
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
if data1 is not None and data2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
data1sq = np.square(data1)
data2sq = np.square(data2)
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = (data2sq * err1sq) + (data1sq * err2sq)
#newerr = np.sqrt(sumsq) / (data1sq+data2sq) ???
newerr = np.sqrt(sumsq)
else:
# Without the data arrays the weighting is unknown.
return self._combine_errors_quadrature(error1, error2)
else:
newerr = None
return newerr
def _combine_errors_divisive(self, error1, error2, data1, data2):
"""
Helper function to combine two error arrays in quadrature,
where each error array is weighted by a sensitivity
coefficient.
This functions is used when one data array is divided by
another, so the sensitivity coefficient for the first array
is proportional to the inverse of the second but the
sensitivity coefficient for the second array is proportional
to the first.
CHECK THE MATHS
NOTE: This function is valid only when combining two sets
of data with independent errors. This assumption might not
be valid in all circumstances, so use with care.
"""
# The end product will have an ERR unit only if both products
# started with an ERR unit.
if error1 is not None and error2 is not None:
if data1 is not None and data2 is not None:
# NOTE: These operations might cause an overflow
# for some data types.
data1sq = np.square(data1)
data2sq = np.square(data2)
# NOTE: The errors will blow up if any of the data2sq values
# are close to zero. There might be a divide by zero.
err1sq = np.square(error1)
err2sq = np.square(error2)
sumsq = (err1sq / data2sq) + \
((err2sq * data1sq) / (data2sq * data2sq))
# sumsq = (data2weight * err1sq) + (data1sq * err2sq)
# Comment by Juergen Schreiber:
# Shouldn't the error propagation according to Gauss be
# sqrt(err1sq*sci2weight + err2sq*sci1sq/(sci2sq*sci2sq))
# since the partial derivation of a/b on b is -a/(b*b)
newerr = np.sqrt(sumsq)
else:
# Without the data arrays the weighting is unknown.
return self._combine_errors_quadrature(error1, error2)
else:
newerr = None
return newerr
def _combine_quality(self, dq1, dq2):
"""
Helper function to combine the quality arrays of two
MiriMeasuredModel objects. Any point flagged as bad in
either of the two products is flagged as bad in the
result.
"""
return combine_quality(dq1, dq2)
def __add__(self, other):
"""
Add a scalar, an array or another JwstDataModel object to
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being added. Add to the SCI array but
# leave the ERR and DQ arrays as they are.
newobject.data = self.data + other
if not self.noerr:
newobject.err = self.err
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being added to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data + np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# Two data products are being added together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data + other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err,
other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot add " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __sub__(self, other):
"""
Subtract a scalar, an array or another JwstDataModel object from
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being subtracted. Subtract from the SCI
# array but leave the ERR and DQ arrays as they are.
newobject.data = self.data - other
if not self.noerr:
newobject.err = self.err
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being subtracted to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data - np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# Two data products are being subtracted. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data - other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err, other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot subtract " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __mul__(self, other):
"""
Multiply this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being multiplied. Multiply the SCI and ERR
# arrays but leave the DQ array as it is.
newobject.data = self.data * other
if not self.noerr:
newobject.err = self.err * other
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data * np.asarray(other)
# Multiplying a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# Two data products are being multiplied together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data * other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_multiplicative( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot multiply " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __truediv__(self, other):
"""
Divide this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being divided. Divide the SCI and ERR
# arrays but leave the DQ array as it is.
# Trap a divide by zero..
if np.abs(other) <= sys.float_info.epsilon:
strg = "%s: Divide by scalar zero!" % self.__class__.__name__
del newobject
raise ValueError(strg)
newobject.data = self.data / other
if not self.noerr:
newobject.err = self.err / other
newobject.dq = self.dq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
# NOTE: Any divide by zero operations will be trapped by numpy.
newobject.data = self.data / np.asarray(other)
# Dividing by a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.dq = self.dq
elif isinstance(other, JwstDataModel):
# The data product is being divided by another. Ensure they
# both have a valid primary data array.
# NOTE: Any divide by zero operations will be trapped by numpy.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data / other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_divisive( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'dq') and self._isvalid(other.dq):
newobject.dq = self._combine_quality(self.dq, other.dq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot divide " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
# From Python 3, division is the same as true division.
def __div__(self, other):
return self.__truediv__(other)
@property
def data_masked(self):
# Generate the masked data on the fly. This ensures the
# masking is always up to date with the latest dq array.
# TODO: Can this result be cached and the cache invalidated
# when either the data or dq arrays change?
if self.data is not None and self.data.ndim > 0 and self.dq is not None:
if np.all(self.dq == 0):
# All data good.
return self.data
else:
self._data_mask = self._generate_mask(self.data, self.dq)
self._data_fill_value = self._generate_fill(self.data,
self._data_fill)
return ma.array(self.data, mask=self._data_mask,
fill_value=self._data_fill_value)
else:
return self.data
@property
def err_masked(self):
# Generate the masked error array on the fly. This ensures the
# masking is always up to date with the latest dq array.
# TODO: Can this result be cached and the cache invalidated
# when either the err or dq arrays change?
if self.noerr:
return None
if self.err is not None and self.err.ndim > 0 and self.dq is not None:
if np.all(self.dq == 0):
# All data good.
return self.err
else:
self._err_mask = self._generate_mask(self.err, self.dq)
self._err_fill_value = self._generate_fill(self.err,
self._err_fill)
return ma.array(self.err, mask=self._err_mask,
fill_value=self._err_fill_value)
else:
return self.err
@property
def data_filled(self):
masked = self.data_masked
if masked is not None and isinstance(masked, ma.masked_array):
return masked.filled(self._data_fill_value)
else:
return self.data
@property
def err_filled(self):
if self.noerr:
return None
masked = self.err_masked
if masked is not None and isinstance(masked, ma.masked_array):
return masked.filled(self._err_fill_value)
else:
return self.err
class HasDataErrAndGroups(HasDataErrAndDq):
"""
An abstract class which overrides the data quality masking functions
of HasDataErrAndDq for ramp data which contains PIXELDQ and RAMPDQ
arrays instead of DQ. The DQ array for ramp data is read-only.
"""
def __init__(self, data, err, noerr=False):
super(HasDataErrAndGroups, self).__init__(data=data, err=err, dq=None,
noerr=noerr )
def __add__(self, other):
"""
Add a scalar, an array or another JwstDataModel object to
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being added. Add to the SCI array but
# leave the ERR and DQ arrays as they are.
newobject.data = self.data + other
if not self.noerr:
newobject.err = self.err
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being added to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data + np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# Two data products are being added together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data + other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err,
other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot add " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __sub__(self, other):
"""
Subtract a scalar, an array or another JwstDataModel object from
this MiriMeasuredModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being subtracted. Subtract from the SCI
# array but leave the ERR and DQ arrays as they are.
newobject.data = self.data - other
if not self.noerr:
newobject.err = self.err
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being subtracted to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data - np.asarray(other)
# Adding a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# Two data products are being subtracted. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data - other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = \
self._combine_errors_quadrature(self.err, other.err)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot subtract " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __mul__(self, other):
"""
Multiply this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being multiplied. Multiply the SCI and ERR
# arrays but leave the DQ array as it is.
newobject.data = self.data * other
if not self.noerr:
newobject.err = self.err * other
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
newobject.data = self.data * np.asarray(other)
# Multiplying a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# Two data products are being multiplied together. Ensure they
# both have a valid primary data array.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data * other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_multiplicative( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot multiply " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
def __truediv__(self, other):
"""
Divide this MiriMeasuredModel object by a scalar, an array or
another JwstDataModel object.
"""
# Check this object is capable of mathematical operation.
self._check_for_data()
# Start with an empty version of the current object and clone
# the metadata.
newobject = self.__class__()
newobject.update( self )
if isinstance(other,(float,int)):
# A scalar quantity is being divided. Divide the SCI and ERR
# arrays but leave the DQ array as it is.
# Trap a divide by zero..
if np.abs(other) <= sys.float_info.epsilon:
strg = "%s: Divide by scalar zero!" % self.__class__.__name__
del newobject
raise ValueError(strg)
newobject.data = self.data / other
if not self.noerr:
newobject.err = self.err / other
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, (ma.masked_array,np.ndarray,list,tuple)):
# A data array is being multiplied to this product. This should
# work provided the two arrays are broadcastable.
# NOTE: Any divide by zero operations will be trapped by numpy.
newobject.data = self.data / np.asarray(other)
# Dividing by a plain data array erases the error information.
if not self.noerr:
newobject.err = np.zeros_like(self.err)
newobject.pixeldq = self.pixeldq
newobject.groupdq = self.groupdq
elif isinstance(other, JwstDataModel):
# The data product is being divided by another. Ensure they
# both have a valid primary data array.
# NOTE: Any divide by zero operations will be trapped by numpy.
if hasattr(other, 'data') and self._isvalid(other.data):
newobject.data = self.data / other.data
if not self.noerr:
if hasattr(other, 'err') and self._isvalid(other.err):
newobject.err = self._combine_errors_divisive( \
self.err, other.err, self.data,
other.data)
else:
# If only one error array is known, the combined error
# becomes unknown.
newobject.err = np.zeros_like(self.err)
if hasattr(other, 'pixeldq') and self._isvalid(other.pixeldq):
newobject.pixeldq = self._combine_quality(self.pixeldq, other.pixeldq)
if hasattr(other, 'groupdq') and self._isvalid(other.groupdq):
newobject.groupdq = self._combine_quality(self.groupdq, other.groupdq)
else:
raise TypeError("Both data products must contain a " + \
"primary data array.")
else:
strg = "Cannot divide " + str(self.__class__.__name__)
strg += " and " + str(other.__class__.__name__) + "objects."
del newobject
raise TypeError(strg)
return newobject
# From Python 3, division is the same as true division.
def __div__(self, other):
return self.__truediv__(other)
#
# A minimal test is run when this file is run as a main program.
# For a more substantial test see miri/datamodels/tests.
#
if __name__ == '__main__':
print("Testing the operations module.")
import math
# Check that dqflags has been imported properly
print("Master data quality flags:")
for flags in master_flags:
print(flags)
data3x3 = np.array([[1.,2.,3.],[4.,5.,6.],[7.,8.,9.]])
err3x3 = np.array([[1.,1.,1.],[2.,2.,2.],[1.,1.,1.]])
dqtest = [[0,1,0],
[0,1,1],
[0,0,0]]
dqtest2 = np.array([dqtest,dqtest,dqtest,dqtest])
testobj = HasDataErrAndDq( data3x3, err3x3, dqtest2)
newdq1 = testobj._shrink_dq( dqtest2 )
print("\nData quality array:\n", dqtest2)
print("has shrunk to:\n", newdq1)
newdq2 = testobj._shrink_dq( newdq1 )
print("and has shrunk again to:\n", newdq2)
newdq3 = testobj._shrink_dq( newdq2 )
print("and has shrunk finally to:\n", newdq3)
del newdq1, newdq2, newdq3
print("Testing combination and masking of data quality arrays")
data3x3 = np.array([[1.,2.,3.],[4.,5.,6.],[7.,8.,9.]])
err3x3 = np.array([[1.,1.,1.],[2.,2.,2.],[1.,1.,1.]])
dqtest = np.array([[0,1,0], [4,2,1], [0,3,0]])
testobj = HasDataErrAndDq( data3x3, err3x3, dqtest2)
mask1 = testobj._generate_mask(data3x3, dqtest, bitmask=None)
print("\nGenerating mask from:\n", dqtest)
print("with no bitmask gives:\n", str(mask1))
mask2 = testobj._generate_mask(data3x3, dqtest, bitmask=1)
print("\nGenerating mask from:\n", dqtest)
print("with bitmask 1 gives:\n", str(mask2))
mask3 = testobj._generate_mask(data3x3, dqtest, bitmask=3)
print("\nGenerating mask from:\n", dqtest)
print("with bitmask 3 gives:\n", str(mask3))
del mask1, mask2, mask3
# Testing error combination functions
sq0 = 0.0
sq1 = 1.0
sq2 = math.sqrt(2.0)
sq3 = math.sqrt(3.0)
sq4 = 4.0
sq5 = math.sqrt(5.0)
sq6 = math.sqrt(6.0)
sq7 = math.sqrt(7.0)
sq8 = math.sqrt(8.0)
sq9 = 3.0
error1 = np.array([[sq0,sq1,sq2],[sq3,sq4,sq5],[sq7,sq8,sq9]])
error2 = np.array([[sq9,sq8,sq7],[sq5,sq4,sq3],[sq2,sq1,sq0]])
error0 = np.zeros_like(error1)
print("\nCombining error array with itself:\n", error1)
newerr = testobj._combine_errors_quadrature(error1, error1)
print("by quadrature:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error0)
newerr = testobj._combine_errors_quadrature(error1, error0)
print("by quadrature:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
newerr = testobj._combine_errors_quadrature(error1, error2)
print("by quadrature:\n", newerr)
data0 = np.array([[0,0,0],[0,0,0],[0,0,0]])
data1 = np.array([[1,1,1],[1,1,1],[1,1,1]])
data2 = np.array([[2,2,2],[2,2,2],[2,2,2]])
data_bad = np.array([[1,1,1],[1,0,1],[1,1,1]])
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted twice by:\n", data1)
newerr = testobj._combine_errors_multiplicative(error1, error2, data1, data1)
print("multiplicative:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data0)
newerr = testobj._combine_errors_multiplicative(error1, error2, data1, data0)
print("multiplicative:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data2)
newerr = testobj._combine_errors_multiplicative(error1, error2, data1, data2)
print("multiplicative:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted twice by:\n", data1)
newerr = testobj._combine_errors_divisive(error1, error2, data1, data1)
print("divisive:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data0)
print("and:\n", data1)
newerr = testobj._combine_errors_divisive(error1, error2, data0, data1)
print("divisive:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data2)
newerr = testobj._combine_errors_divisive(error1, error2, data1, data2)
print("divisive:\n", newerr)
print("\nCombining error array:\n", error1)
print("with:\n", error2)
print("weighted by:\n", data1)
print("and:\n", data_bad)
newerr = testobj._combine_errors_divisive(error1, error2, data1, data_bad)
print("divisive:\n", newerr)
print("Test finished.")
| 39.940134
| 91
| 0.567067
| 7,690
| 65,382
| 4.706112
| 0.078023
| 0.017021
| 0.0147
| 0.02089
| 0.77748
| 0.755347
| 0.740923
| 0.734015
| 0.708897
| 0.694612
| 0
| 0.011987
| 0.355633
| 65,382
| 1,636
| 92
| 39.964548
| 0.847021
| 0.326558
| 0
| 0.748481
| 0
| 0
| 0.066108
| 0
| 0
| 0
| 0
| 0.000611
| 0
| 1
| 0.057108
| false
| 0.003645
| 0.008505
| 0.003645
| 0.147023
| 0.068044
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e097f68cfacb8ad1ff46f8b327104de87cba33ad
| 128
|
py
|
Python
|
ezconfigparser/__init__.py
|
WangZesen/ezconfigparser
|
b94b21dd39cc810b6758386edbbc22a18c22f249
|
[
"MIT"
] | null | null | null |
ezconfigparser/__init__.py
|
WangZesen/ezconfigparser
|
b94b21dd39cc810b6758386edbbc22a18c22f249
|
[
"MIT"
] | null | null | null |
ezconfigparser/__init__.py
|
WangZesen/ezconfigparser
|
b94b21dd39cc810b6758386edbbc22a18c22f249
|
[
"MIT"
] | null | null | null |
import sys
VERSION = '0.2.8'
if sys.version_info < (3, 0):
from config import Config
else:
from .config import Config
| 14.222222
| 30
| 0.671875
| 21
| 128
| 4.047619
| 0.571429
| 0.235294
| 0.376471
| 0.517647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050505
| 0.226563
| 128
| 8
| 31
| 16
| 0.808081
| 0
| 0
| 0
| 0
| 0
| 0.039063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
e09c68041d8a19afe477e67b533016d26070046d
| 7,385
|
py
|
Python
|
tests/tasks/sftp/test_sftp.py
|
vlad-mois/prefect
|
5427ddb2e49dc4732ad034c58ed2604ea1faa4a3
|
[
"Apache-2.0"
] | null | null | null |
tests/tasks/sftp/test_sftp.py
|
vlad-mois/prefect
|
5427ddb2e49dc4732ad034c58ed2604ea1faa4a3
|
[
"Apache-2.0"
] | null | null | null |
tests/tasks/sftp/test_sftp.py
|
vlad-mois/prefect
|
5427ddb2e49dc4732ad034c58ed2604ea1faa4a3
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import MagicMock
import pytest
import os
from paramiko import Transport, SFTPClient
from prefect.tasks.sftp.sftp import SftpDownload, SftpUpload
@pytest.fixture
def mock_conn(monkeypatch):
sftp_conn = MagicMock()
transport = MagicMock(spec=Transport)
transport.connect = MagicMock(username="test", password="test")
sftp_client = MagicMock(spec=SFTPClient)
connection = MagicMock()
sftp_client.return_value = MagicMock(from_transport=connection)
monkeypatch.setattr("prefect.tasks.sftp.sftp.SFTPClient", sftp_client)
monkeypatch.setattr("prefect.tasks.sftp.sftp.Transport", transport)
return sftp_conn, sftp_client
class TestSftpDownload:
def test_construction(self):
"""
Tests that all required params are present for SftpDownload Task.
"""
task = SftpDownload(
host="test",
port_number=22,
password="test",
username="test",
remote_path="test",
)
assert task.host == "test"
assert task.username == "test"
assert task.password == "test"
assert task.port_number == 22
assert task.remote_path == "test"
def test_required_params(self):
"""
Tests to check if there are missing required parameters.
"""
# raises Value error if host name is not provided
with pytest.raises(ValueError, match="A host name must be provided"):
SftpDownload().run(
port_number=22,
password="test",
username="test",
remote_path="foo-home/sftp-test.csv",
)
# raises Value error if port_number name is not provided
with pytest.raises(ValueError, match="A port_number name must be provided"):
SftpDownload().run(
host="test",
password="test",
username="test",
remote_path="foo-home/sftp-test.csv",
)
# raises Value error if username is not provided
with pytest.raises(ValueError, match="User name must be provided"):
SftpDownload().run(
host="test",
port_number=22,
password="test",
remote_path="foo-home/sftp-test.csv",
)
# raises Value error if password is not provided
with pytest.raises(ValueError, match="A password must be provided"):
SftpDownload().run(
host="test",
port_number=22,
username="test",
remote_path="foo-home/sftp-test.csv",
)
# raises Value error if remote_path is not provided
with pytest.raises(ValueError, match="A remote_path must be provided"):
SftpDownload().run(
host="test",
port_number=22,
password="test",
username="test",
)
# test to check if the ddl/dml query was executed
def test_execute_download(self, mock_conn):
from prefect import Flow
"""
Tests that the SftpDownload Task can download a file.
"""
remote_path = "foo-home/sftp-test.csv"
connection = mock_conn[0]
connection().__enter__().get.return_value = True
# init the SFTPDownload Task
sftp_download_task = SftpDownload(
host="test",
port_number=22,
password="test",
username="test",
remote_path=remote_path,
)
with Flow(name="test") as f:
sftp_download_task._connection = connection
out = f.run()
assert out.is_successful()
class TestSftpUpload:
def test_construction(self):
"""
Tests that all required params are present for SftpUpload Task.
"""
task = SftpUpload(
host="test",
port_number=22,
password="test",
username="test",
remote_path="test",
local_path="test",
)
assert task.host == "test"
assert task.username == "test"
assert task.password == "test"
assert task.port_number == 22
assert task.remote_path == "test"
assert task.local_path == "test"
def test_required_params(self):
"""
Tests to check if there are missing required parameters.
"""
# raises Value error if host name is not provided
with pytest.raises(ValueError, match="A host name must be provided"):
SftpUpload().run(
port_number=22,
password="test",
username="test",
remote_path="foo-home/sftp-test.csv",
local_path="foo-home/sftp-test.csv",
)
# raises Value error if port_number name is not provided
with pytest.raises(ValueError, match="A port_number name must be provided"):
SftpUpload().run(
host="test",
password="test",
username="test",
remote_path="foo-home/sftp-test.csv",
local_path="foo-home/sftp-test.csv",
)
# raises Value error if username is not provided
with pytest.raises(ValueError, match="User name must be provided"):
SftpUpload().run(
host="test",
port_number=22,
password="test",
remote_path="foo-home/sftp-test.csv",
local_path="foo-home/sftp-test.csv",
)
# raises Value error if password is not provided
with pytest.raises(ValueError, match="A password must be provided"):
SftpUpload().run(
host="test",
port_number=22,
username="test",
remote_path="foo-home/sftp-test.csv",
local_path="foo-home/sftp-test.csv",
)
# raises Value error if remote_path is not provided
with pytest.raises(ValueError, match="A remote_path must be provided"):
SftpUpload().run(
host="test",
port_number=22,
password="test",
username="test",
local_path="foo-home/sftp-test.csv",
)
# raises Value error if local_path is not provided
with pytest.raises(ValueError, match="A local_path must be provided"):
SftpUpload().run(
host="test",
port_number=22,
password="test",
username="test",
remote_path="foo-home/sftp-test.csv",
)
# test to check if the ddl/dml query was executed
def test_execute_upload(self, mock_conn):
"""
Tests that the SftpUpload Task can download a file.
"""
connection = mock_conn[0]
connection().__enter__().put.return_value = True
sftp_upload_task = SftpUpload(
host="test",
port_number=22,
password="test",
username="test",
remote_path="foo-home/sftp-test.csv",
local_path="foo-home/sftp-test.csv",
)
sftp_upload_task._connection = connection
sftp_upload_task.run()
connection.assert_called_once()
| 33.568182
| 84
| 0.557752
| 798
| 7,385
| 5.038847
| 0.122807
| 0.052226
| 0.046506
| 0.063417
| 0.779906
| 0.767471
| 0.726188
| 0.719224
| 0.713753
| 0.713753
| 0
| 0.006607
| 0.344211
| 7,385
| 219
| 85
| 33.721461
| 0.823663
| 0.130264
| 0
| 0.664596
| 0
| 0
| 0.157311
| 0.070935
| 0
| 0
| 0
| 0
| 0.080745
| 1
| 0.043478
| false
| 0.111801
| 0.037267
| 0
| 0.099379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e0d0625d2762d81e0113c8d4a465db51daa01fda
| 48
|
py
|
Python
|
model/__init__.py
|
a101269/Chinese_Semantic_Dependency_Parser_with_knowledge
|
ca9998045c7789bc3ea5ad6a8ce7fe0af8308669
|
[
"MIT"
] | 1
|
2020-11-06T01:39:44.000Z
|
2020-11-06T01:39:44.000Z
|
utils/__init__.py
|
a101269/Chinese_Semantic_Dependency_Parser_with_knowledge
|
ca9998045c7789bc3ea5ad6a8ce7fe0af8308669
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
a101269/Chinese_Semantic_Dependency_Parser_with_knowledge
|
ca9998045c7789bc3ea5ad6a8ce7fe0af8308669
|
[
"MIT"
] | null | null | null |
# Author: a101269
# Date : 2020/3/4
| 8
| 22
| 0.479167
| 6
| 48
| 3.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.413793
| 0.395833
| 48
| 5
| 23
| 9.6
| 0.37931
| 0.770833
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e0e059a4c1a950d9979c4eb6b1f6db1b5936c027
| 107
|
py
|
Python
|
lib/carbon/tests/util.py
|
hessu/carbon
|
db0ffa3dea0e8fffd5cd05c22b60c08d7e4ae799
|
[
"Apache-2.0"
] | 961
|
2015-01-01T14:20:35.000Z
|
2022-03-29T22:15:35.000Z
|
lib/carbon/tests/util.py
|
hessu/carbon
|
db0ffa3dea0e8fffd5cd05c22b60c08d7e4ae799
|
[
"Apache-2.0"
] | 611
|
2015-01-03T20:31:23.000Z
|
2022-03-31T21:30:23.000Z
|
lib/carbon/tests/util.py
|
hessu/carbon
|
db0ffa3dea0e8fffd5cd05c22b60c08d7e4ae799
|
[
"Apache-2.0"
] | 326
|
2015-01-03T14:55:33.000Z
|
2022-03-31T01:43:49.000Z
|
from carbon.conf import Settings
class TestSettings(Settings):
def readFrom(*args, **kwargs):
pass
| 15.285714
| 32
| 0.728972
| 13
| 107
| 6
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168224
| 107
| 6
| 33
| 17.833333
| 0.876404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
46090780bcd8a23b6fab52b5a1c614f6298469b8
| 1,399
|
py
|
Python
|
generateTrees.py
|
NathanWhelan/generateSequences
|
1848a42e28d9e22ec7614bda0be68916d7898b1d
|
[
"MIT"
] | 2
|
2015-08-30T01:13:54.000Z
|
2016-01-23T02:11:44.000Z
|
generateTrees.py
|
NathanWhelan/generateSequences
|
1848a42e28d9e22ec7614bda0be68916d7898b1d
|
[
"MIT"
] | null | null | null |
generateTrees.py
|
NathanWhelan/generateSequences
|
1848a42e28d9e22ec7614bda0be68916d7898b1d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from __future__ import division
import re
import sys
import numpy
####This script generates a list of gamma distributed values for gene fragments to be used to simulate
####sequences with indel-seq-gen. It also creates a list of the partitions for partitionFinder
gammaNumbers=numpy.random.gamma(2.5,scale=150,size=200) #size needs to be number of genes to creat
length=len(gammaNumbers)
#print(gammaNumbers)
y=1
print gammaNumbers[2]
for x in gammaNumbers:
number=int(x)
output=open(str(y) +".tre","w")
print number
##Change indel probability and tree as needed
output.write("[" + str(number) +"]{5,0.1}((((n:0.11099999999999977,o:0.1349999999999998):0.24099999999999966,((p:0.4810000000000003,q:0.2410000000000001):0.12300000000000022,R:0.30100000000000016):0.023999999999999133):0.5570000000000004,(m:4.672,((l:0.4320000000000004,(k:0.25100000000000033,j:0.5670000000000002):0.13100000000000023):0.11499999999999932,(((e:0.1990000000000003,f:1.3410000000000002):0.21599999999999975,(g:0.2669999999999999,(h:0.14300000000000024,I:0.09100000000000019):0.022999999999999687):0.19799999999999995):0.5669999999999997,((c:0.23399999999999999,(a:0.01100000000000012,b:3.21):0.09699999999999998):0.19799999999999995,d:0.4990000000000001):0.21199999999999974):0.2869999999999999):0.06300000000000061):0.09199999999999964):0.0675,x:4.135);" + "\n")
output.close()
y=y+1
| 53.807692
| 779
| 0.781987
| 200
| 1,399
| 5.45
| 0.625
| 0.009174
| 0.012844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.458109
| 0.07005
| 1,399
| 25
| 780
| 55.96
| 0.379708
| 0.219442
| 0
| 0
| 1
| 0.066667
| 0.689527
| 0.682113
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.266667
| null | null | 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4611a3aaf4174d6e04917af1b5d10b058d54f8ea
| 8,052
|
py
|
Python
|
xlsxwriter/test/worksheet/test_calcuate_spans.py
|
sontek/XlsxWriter
|
7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2015-05-19T22:17:15.000Z
|
2015-05-19T22:17:15.000Z
|
xlsxwriter/test/worksheet/test_calcuate_spans.py
|
sontek/XlsxWriter
|
7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
xlsxwriter/test/worksheet/test_calcuate_spans.py
|
sontek/XlsxWriter
|
7f17a52f95be9ecfb9c7f213fc0a02e0f625c6ec
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestCalculateSpans(unittest.TestCase):
"""
Test the _calculate_spans Worksheet method for different cell ranges.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_calculate_spans_0(self):
"""Test Worksheet _calculate_spans()"""
row = 0
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
exp = {0: '1:16', 1: '17:17'}
got = self.worksheet.row_spans
self.assertEqual(got, exp)
def test_calculate_spans_1(self):
"""Test Worksheet _calculate_spans()"""
row = 0
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:16', 1: '17:17'}
self.assertEqual(got, exp)
def test_calculate_spans_2(self):
"""Test Worksheet _calculate_spans()"""
row = 1
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:15', 1: '16:17'}
self.assertEqual(got, exp)
def test_calculate_spans_3(self):
"""Test Worksheet _calculate_spans()"""
row = 2
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:14', 1: '15:17'}
self.assertEqual(got, exp)
def test_calculate_spans_4(self):
"""Test Worksheet _calculate_spans()"""
row = 3
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:13', 1: '14:17'}
self.assertEqual(got, exp)
def test_calculate_spans_5(self):
"""Test Worksheet _calculate_spans()"""
row = 4
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:12', 1: '13:17'}
self.assertEqual(got, exp)
def test_calculate_spans_6(self):
"""Test Worksheet _calculate_spans()"""
row = 5
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:11', 1: '12:17'}
self.assertEqual(got, exp)
def test_calculate_spans_7(self):
"""Test Worksheet _calculate_spans()"""
row = 6
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:10', 1: '11:17'}
self.assertEqual(got, exp)
def test_calculate_spans_8(self):
"""Test Worksheet _calculate_spans()"""
row = 7
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:9', 1: '10:17'}
self.assertEqual(got, exp)
def test_calculate_spans_9(self):
"""Test Worksheet _calculate_spans()"""
row = 8
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:8', 1: '9:17'}
self.assertEqual(got, exp)
def test_calculate_spans_10(self):
"""Test Worksheet _calculate_spans()"""
row = 9
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:7', 1: '8:17'}
self.assertEqual(got, exp)
def test_calculate_spans_11(self):
"""Test Worksheet _calculate_spans()"""
row = 10
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:6', 1: '7:17'}
self.assertEqual(got, exp)
def test_calculate_spans_12(self):
"""Test Worksheet _calculate_spans()"""
row = 11
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:5', 1: '6:17'}
self.assertEqual(got, exp)
def test_calculate_spans_13(self):
"""Test Worksheet _calculate_spans()"""
row = 12
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:4', 1: '5:17'}
self.assertEqual(got, exp)
def test_calculate_spans_14(self):
"""Test Worksheet _calculate_spans()"""
row = 13
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:3', 1: '4:17'}
self.assertEqual(got, exp)
def test_calculate_spans_15(self):
"""Test Worksheet _calculate_spans()"""
row = 14
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:2', 1: '3:17'}
self.assertEqual(got, exp)
def test_calculate_spans_16(self):
"""Test Worksheet _calculate_spans()"""
row = 15
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {0: '1:1', 1: '2:17'}
self.assertEqual(got, exp)
def test_calculate_spans_17(self):
"""Test Worksheet _calculate_spans()"""
row = 16
col = 0
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {1: '1:16', 2: '17:17'}
self.assertEqual(got, exp)
def test_calculate_spans_18(self):
"""Test Worksheet _calculate_spans()"""
row = 16
col = 1
for i in range(row, row + 17):
self.worksheet.write_number(i, col, 1)
col = col + 1
self.worksheet._calculate_spans()
got = self.worksheet.row_spans
exp = {1: '2:17', 2: '18:18'}
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| 24.326284
| 79
| 0.536388
| 1,025
| 8,052
| 4.035122
| 0.065366
| 0.185445
| 0.211315
| 0.09647
| 0.879594
| 0.873791
| 0.750484
| 0.750484
| 0.718569
| 0.558994
| 0
| 0.057949
| 0.331346
| 8,052
| 330
| 80
| 24.4
| 0.710253
| 0.099106
| 0
| 0.69
| 0
| 0
| 0.022949
| 0
| 0
| 0
| 0
| 0
| 0.095
| 1
| 0.1
| false
| 0
| 0.015
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1cd31b4890f3b6fdc7244e05a71517bd89b89233
| 48
|
py
|
Python
|
napari_plot/layers/line/__init__.py
|
lukasz-migas/napari-1d
|
b0f081a8711ae941b3e4b5c58c3aea56bd0e3277
|
[
"BSD-3-Clause"
] | 13
|
2021-08-27T23:01:09.000Z
|
2022-03-22T13:51:35.000Z
|
napari_plot/layers/line/__init__.py
|
lukasz-migas/napari-1d
|
b0f081a8711ae941b3e4b5c58c3aea56bd0e3277
|
[
"BSD-3-Clause"
] | 71
|
2021-08-28T13:29:17.000Z
|
2022-03-28T21:22:12.000Z
|
napari_plot/layers/line/__init__.py
|
lukasz-migas/napari-1d
|
b0f081a8711ae941b3e4b5c58c3aea56bd0e3277
|
[
"BSD-3-Clause"
] | null | null | null |
"""Line"""
from .line import Line # noqa: F401
| 16
| 36
| 0.625
| 7
| 48
| 4.285714
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.1875
| 48
| 2
| 37
| 24
| 0.692308
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1cd4112b87a1b4de487e247394d64737a868735f
| 109
|
py
|
Python
|
TDD/doctests/main.py
|
raphael-d-cordeiro/Python_Public
|
56d0080393dab5f80ad650e27cb993006b17ac1b
|
[
"MIT"
] | null | null | null |
TDD/doctests/main.py
|
raphael-d-cordeiro/Python_Public
|
56d0080393dab5f80ad650e27cb993006b17ac1b
|
[
"MIT"
] | null | null | null |
TDD/doctests/main.py
|
raphael-d-cordeiro/Python_Public
|
56d0080393dab5f80ad650e27cb993006b17ac1b
|
[
"MIT"
] | null | null | null |
"""
Look operations module for doctests
"""
from operations import multiply_num
print(multiply_num(2, 10))
| 13.625
| 35
| 0.761468
| 15
| 109
| 5.4
| 0.8
| 0.271605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031915
| 0.137615
| 109
| 7
| 36
| 15.571429
| 0.829787
| 0.321101
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
1ce0474f17341b1b36ec41689c2b2dca7dd36910
| 33
|
py
|
Python
|
goto_cloud/tracked_model/public.py
|
jdepoix/goto_cloud
|
59bb9923026e1b1dc6e8e08fb6b21300c8e8854a
|
[
"MIT"
] | 2
|
2018-02-04T23:22:17.000Z
|
2019-04-15T12:06:04.000Z
|
goto_cloud/tracked_model/public.py
|
jdepoix/goto_cloud
|
59bb9923026e1b1dc6e8e08fb6b21300c8e8854a
|
[
"MIT"
] | null | null | null |
goto_cloud/tracked_model/public.py
|
jdepoix/goto_cloud
|
59bb9923026e1b1dc6e8e08fb6b21300c8e8854a
|
[
"MIT"
] | null | null | null |
from .models import TrackedModel
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e82666fc86bb056aa267370cc3b244d809f626b0
| 2,927
|
py
|
Python
|
thenewboston_node/business_logic/tests/test_file_blockchain/test_add_block_validation.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 30
|
2021-03-05T22:08:17.000Z
|
2021-09-23T02:45:45.000Z
|
thenewboston_node/business_logic/tests/test_file_blockchain/test_add_block_validation.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 148
|
2021-03-05T23:37:50.000Z
|
2021-11-02T02:18:58.000Z
|
thenewboston_node/business_logic/tests/test_file_blockchain/test_add_block_validation.py
|
nishp77/thenewboston-node
|
158b1f1739b2c6c9c21c80e9da854ca141f1cf8f
|
[
"MIT"
] | 14
|
2021-03-05T21:58:46.000Z
|
2021-10-15T17:27:52.000Z
|
import pytest
from thenewboston_node.business_logic.exceptions import ValidationError
from thenewboston_node.business_logic.models import (
Block, NodeDeclarationSignedChangeRequest, PrimaryValidatorScheduleSignedChangeRequest
)
def test_pv_schedule_after_node_declaration_is_successful(
file_blockchain, another_node_key_pair, primary_validator_key_pair, preferred_node_network_address
):
nd_request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[preferred_node_network_address],
fee_amount=3,
signing_key=another_node_key_pair.private,
)
nd_block = Block.create_from_signed_change_request(file_blockchain, nd_request, primary_validator_key_pair.private)
file_blockchain.add_block(nd_block)
pv_schedule_request = PrimaryValidatorScheduleSignedChangeRequest.create(
begin_block_number=100,
end_block_number=199,
signing_key=another_node_key_pair.private,
)
pv_schedule_block = Block.create_from_signed_change_request(
file_blockchain, pv_schedule_request, primary_validator_key_pair.private
)
file_blockchain.add_block(pv_schedule_block)
file_blockchain.validate()
def test_pv_schedule_without_node_declaration_fails(
file_blockchain, another_node_key_pair, primary_validator_key_pair
):
pv_schedule_request = PrimaryValidatorScheduleSignedChangeRequest.create(
begin_block_number=100,
end_block_number=199,
signing_key=another_node_key_pair.private,
)
pv_schedule_block = Block.create_from_signed_change_request(
file_blockchain, pv_schedule_request, primary_validator_key_pair.private
)
with pytest.raises(
ValidationError, match='Signer node must be declared in the blockchain before primary validator schedule'
):
file_blockchain.add_block(pv_schedule_block)
def test_pv_schedule_begin_block_number_must_be_less_than_end_block_number(
file_blockchain, another_node_key_pair, primary_validator_key_pair, preferred_node_network_address
):
nd_request = NodeDeclarationSignedChangeRequest.create(
network_addresses=[preferred_node_network_address],
fee_amount=3,
signing_key=another_node_key_pair.private,
)
nd_block = Block.create_from_signed_change_request(file_blockchain, nd_request, primary_validator_key_pair.private)
file_blockchain.add_block(nd_block)
pv_schedule_request = PrimaryValidatorScheduleSignedChangeRequest.create(
begin_block_number=100,
end_block_number=99,
signing_key=another_node_key_pair.private,
)
pv_schedule_block = Block.create_from_signed_change_request(
file_blockchain, pv_schedule_request, primary_validator_key_pair.private
)
with pytest.raises(ValidationError, match='Begin block number must be less or equal than end block number'):
file_blockchain.add_block(pv_schedule_block)
| 40.09589
| 119
| 0.800137
| 350
| 2,927
| 6.174286
| 0.188571
| 0.051828
| 0.064785
| 0.066636
| 0.839426
| 0.808885
| 0.768163
| 0.726978
| 0.726978
| 0.726978
| 0
| 0.007615
| 0.147591
| 2,927
| 72
| 120
| 40.652778
| 0.858517
| 0
| 0
| 0.633333
| 0
| 0
| 0.048514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c04fb200ebab140684c246b9ae3a9ca2b98ac5b
| 2,698
|
py
|
Python
|
test/geometry/epipolar/test_epipolar_metrics.py
|
pmeier/kornia
|
57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-04-15T01:20:01.000Z
|
2022-01-12T14:12:54.000Z
|
test/geometry/epipolar/test_epipolar_metrics.py
|
pmeier/kornia
|
57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/geometry/epipolar/test_epipolar_metrics.py
|
pmeier/kornia
|
57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-15T03:22:24.000Z
|
2021-05-15T03:22:24.000Z
|
import pytest
import torch
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import kornia.geometry.epipolar as epi
import kornia.testing as utils
class TestSymmetricalEpipolarDistance:
def test_smoke(self, device, dtype):
pts1 = torch.rand(1, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(1, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.symmetrical_epipolar_distance(pts1, pts2, Fm).shape == (1, 4)
def test_batch(self, device, dtype):
batch_size = 5
pts1 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.symmetrical_epipolar_distance(pts1, pts2, Fm).shape == (5, 4)
def test_gradcheck(self, device):
# generate input data
batch_size, num_points, num_dims = 2, 3, 2
points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)
points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)
Fm = utils.create_random_fundamental_matrix(batch_size).type_as(points2)
assert gradcheck(epi.symmetrical_epipolar_distance, (points1, points2, Fm),
raise_exception=True)
class TestSampsonEpipolarDistance:
def test_smoke(self, device, dtype):
pts1 = torch.rand(1, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(1, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.sampson_epipolar_distance(pts1, pts2, Fm).shape == (1, 4)
def test_batch(self, device, dtype):
batch_size = 5
pts1 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
pts2 = torch.rand(batch_size, 4, 3, device=device, dtype=dtype)
Fm = utils.create_random_fundamental_matrix(1).type_as(pts1)
assert epi.sampson_epipolar_distance(pts1, pts2, Fm).shape == (5, 4)
def test_gradcheck(self, device):
# generate input data
batch_size, num_points, num_dims = 2, 3, 2
points1 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64, requires_grad=True)
points2 = torch.rand(batch_size, num_points, num_dims, device=device, dtype=torch.float64)
Fm = utils.create_random_fundamental_matrix(batch_size).type_as(points2)
assert gradcheck(epi.sampson_epipolar_distance, (points1, points2, Fm),
raise_exception=True)
| 45.728814
| 118
| 0.693477
| 370
| 2,698
| 4.859459
| 0.156757
| 0.097887
| 0.113459
| 0.062291
| 0.868743
| 0.868743
| 0.868743
| 0.868743
| 0.813126
| 0.813126
| 0
| 0.036212
| 0.201631
| 2,698
| 58
| 119
| 46.517241
| 0.798514
| 0.014455
| 0
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c20248231ef90a4718bcebfb19f64a4304246f8
| 31,755
|
py
|
Python
|
Packs/ExportIndicators/Integrations/ExportIndicators/ExportIndicators_test.py
|
smokescreen-akshay/content
|
780e0c57a3201e405d4416154c5d08a4fbb9384c
|
[
"MIT"
] | 1
|
2020-04-19T11:05:42.000Z
|
2020-04-19T11:05:42.000Z
|
Packs/ExportIndicators/Integrations/ExportIndicators/ExportIndicators_test.py
|
smokescreen-akshay/content
|
780e0c57a3201e405d4416154c5d08a4fbb9384c
|
[
"MIT"
] | null | null | null |
Packs/ExportIndicators/Integrations/ExportIndicators/ExportIndicators_test.py
|
smokescreen-akshay/content
|
780e0c57a3201e405d4416154c5d08a4fbb9384c
|
[
"MIT"
] | 1
|
2021-05-31T15:08:48.000Z
|
2021-05-31T15:08:48.000Z
|
"""Imports"""
import json
import pytest
import demistomock as demisto
from netaddr import IPAddress
IOC_RES_LEN = 38
'''Tests'''
@pytest.mark.helper_commands
class TestHelperFunctions:
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_1(self, mocker):
"""Test on_demand"""
from ExportIndicators import get_outbound_ioc_values, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict})
request_args = RequestArguments(query='', out_format='text', limit=50, offset=0)
ioc_list = get_outbound_ioc_values(
on_demand=True,
request_args=request_args
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_2(self, mocker):
"""Test update by not on_demand with no refresh"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383899, 1578383899))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_3(self, mocker):
"""Test update by not on_demand with refresh"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_4(self, mocker):
"""Test update by request params change - limit"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict,
"last_limit": 1, "last_offset": 0,
"last_query": "type:ip",
"last_format": "text"})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='type:ip', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_5(self, mocker):
"""Test update by request params change - offset"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict,
"last_limit": 50, "last_offset": 1,
"last_query": "type:ip",
"last_format": "text"})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='type:ip', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.get_outbound_ioc_values
def test_get_outbound_ioc_values_6(self, mocker):
"""Test update by request params change - query"""
import CommonServerPython as CSP
mocker.patch.object(CSP, 'parse_date_range', return_value=(1578383898, 1578383898))
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_text_values_f:
iocs_text_dict = json.loads(iocs_text_values_f.read())
mocker.patch.object(demisto, 'getIntegrationContext', return_value={"last_output": iocs_text_dict,
"last_limit": 50, "last_offset": 0,
"last_query": "type:URL",
"last_format": "text"})
mocker.patch.object(ei, 'refresh_outbound_context', return_value=iocs_text_dict)
mocker.patch.object(demisto, 'getLastRun', return_value={'last_run': 1578383898000})
request_args = ei.RequestArguments(query='type:ip', out_format='text', limit=50, offset=0)
ioc_list = ei.get_outbound_ioc_values(
on_demand=False,
request_args=request_args,
cache_refresh_rate='1 minute'
)
for ioc_row in ioc_list:
assert ioc_row in iocs_text_dict
@pytest.mark.list_to_str
def test_list_to_str_1(self):
"""Test invalid"""
from ExportIndicators import list_to_str
with pytest.raises(AttributeError):
invalid_list_value = 2
list_to_str(invalid_list_value)
with pytest.raises(AttributeError):
invalid_list_value = {'invalid': 'invalid'}
list_to_str(invalid_list_value)
@pytest.mark.list_to_str
def test_list_to_str_2(self):
"""Test empty"""
from ExportIndicators import list_to_str
assert list_to_str(None) == ''
assert list_to_str([]) == ''
assert list_to_str({}) == ''
@pytest.mark.list_to_str
def test_list_to_str_3(self):
"""Test non empty fields"""
from ExportIndicators import list_to_str
valid_list_value = [1, 2, 3, 4]
assert list_to_str(valid_list_value) == '1,2,3,4'
assert list_to_str(valid_list_value, '.') == '1.2.3.4'
assert list_to_str(valid_list_value, map_func=lambda x: f'{x}a') == '1a,2a,3a,4a'
@pytest.mark.get_params_port
def test_get_params_port_1(self):
"""Test invalid"""
from CommonServerPython import DemistoException
from ExportIndicators import get_params_port
params = {'longRunningPort': 'invalid'}
with pytest.raises(DemistoException):
get_params_port(params)
@pytest.mark.get_params_port
def test_get_params_port_2(self):
"""Test empty"""
from ExportIndicators import get_params_port
params = {'longRunningPort': ''}
with pytest.raises(ValueError):
get_params_port(params)
@pytest.mark.get_params_port
def test_get_params_port_3(self):
"""Test valid"""
from ExportIndicators import get_params_port
params = {'longRunningPort': '80'}
assert get_params_port(params) == 80
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_1(self, mocker):
"""Test out_format=text"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='text', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
for ioc in iocs_json:
ip = ioc.get('value')
assert ip in ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_2(self, mocker):
"""Test out_format= XSOAR json"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='XSOAR json', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
assert isinstance(ei_vals, str)
ei_vals = json.loads(ei_vals)
assert iocs_json == ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_3(self, mocker):
"""Test out_format=xsoar-csv"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='XSOAR csv', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_csv.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
for ioc in iocs_out.split('\n'):
assert ioc in ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_4(self, mocker):
"""Test out_format=XSOAR json-seq"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='XSOAR json-seq', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json_seq.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
assert iocs_out == ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_5(self, mocker):
"""Test out_format=json"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='json', limit=2)
ei_vals = ei.refresh_outbound_context(request_args)
ei_vals = json.loads(ei_vals)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json.json', 'r') as iocs_json_out_f:
iocs_json_out = json.loads(iocs_json_out_f.read())
assert iocs_json_out == ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_6(self, mocker):
"""Test out_format=json-seq"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='json-seq', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json_seq_old.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
for iocs_out_line in iocs_out.split('\n'):
assert iocs_out_line in ei_vals
@pytest.mark.refresh_outbound_context
def test_refresh_outbound_context_7(self, mocker):
"""Test out_format=csv"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
mocker.patch.object(ei, 'find_indicators_with_limit', return_value=iocs_json)
request_args = ei.RequestArguments(query='', out_format='csv', limit=38)
ei_vals = ei.refresh_outbound_context(request_args)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_csv_old.txt', 'r') as iocs_out_f:
iocs_out = iocs_out_f.read()
for ioc in iocs_out.split('\n'):
assert ioc in ei_vals
@pytest.mark.find_indicators_with_limit
def test_find_indicators_with_limit_1(self, mocker):
"""Test find indicators limit"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
limit = 30
mocker.patch.object(ei, 'find_indicators_with_limit_loop', return_value=(iocs_json, 1))
ei_vals = ei.find_indicators_with_limit(indicator_query='', limit=limit, offset=0)
assert len(ei_vals) == limit
@pytest.mark.find_indicators_with_limit
def test_find_indicators_with_limit_and_offset_1(self, mocker):
"""Test find indicators limit and offset"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
limit = 30
offset = 1
mocker.patch.object(ei, 'find_indicators_with_limit_loop', return_value=(iocs_json, 1))
ei_vals = ei.find_indicators_with_limit(indicator_query='', limit=limit, offset=offset)
assert len(ei_vals) == limit
# check that the first value is the second on the list
assert ei_vals[0].get('value') == '212.115.110.19'
@pytest.mark.find_indicators_with_limit_loop
def test_find_indicators_with_limit_loop_1(self, mocker):
"""Test find indicators stops when reached last page"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_dict = {'iocs': json.loads(iocs_json_f.read())}
limit = 50
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs_dict)
ei_vals, nxt_pg = ei.find_indicators_with_limit_loop(indicator_query='', limit=limit)
assert nxt_pg == 1 # assert entered into loop
@pytest.mark.find_indicators_with_limit_loop
def test_find_indicators_with_limit_loop_2(self, mocker):
"""Test find indicators stops when reached limit"""
import ExportIndicators as ei
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_dict = {'iocs': json.loads(iocs_json_f.read())}
limit = 30
mocker.patch.object(demisto, 'searchIndicators', return_value=iocs_dict)
ei.PAGE_SIZE = IOC_RES_LEN
ei_vals, nxt_pg = ei.find_indicators_with_limit_loop(indicator_query='', limit=limit,
last_found_len=IOC_RES_LEN)
assert nxt_pg == 1 # assert entered into loop
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_1(self):
"""Test XSOAR CSV out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_XSOAR_CSV, RequestArguments, CTX_VALUES_KEY
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_XSOAR_CSV)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
csv_out = returned_dict.get(CTX_VALUES_KEY)
# assert len(csv_out) == IOC_RES_LEN + 1
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_csv.txt', 'r') as iocs_out_f:
expected_csv_out = iocs_out_f.read()
for csv_line in csv_out.split('\n'):
assert csv_line in expected_csv_out
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_2(self):
"""Test XSOAR JSON out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_XSOAR_JSON, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.load(iocs_json_f)
request_args = RequestArguments(query='', out_format=FORMAT_XSOAR_JSON)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
json_out = json.loads(returned_dict.get(CTX_VALUES_KEY))
assert json_out == iocs_json
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_3(self):
"""Test XSOAR JSON_SEQ out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_XSOAR_JSON_SEQ, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_XSOAR_JSON_SEQ)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
json_seq_out = returned_dict.get(CTX_VALUES_KEY)
for seq_line in json_seq_out.split('\n'):
assert json.loads(seq_line) in iocs_json
@pytest.mark.create_values_for_returned_dict
def test_create_values_for_returned_dict_4(self):
"""Test TEXT out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_TEXT, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_TEXT)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
text_out = returned_dict.get(CTX_VALUES_KEY)
with open('ExportIndicators_test/TestHelperFunctions/iocs_cache_values_text.json', 'r') as iocs_txt_f:
iocs_txt_json = json.load(iocs_txt_f)
for line in text_out.split('\n'):
assert line in iocs_txt_json
@pytest.mark.create_values_out_dict
def test_create_values_for_returned_dict_5(self):
"""Test JSON out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_JSON, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_JSON)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
json_out = json.loads(returned_dict.get(CTX_VALUES_KEY))
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json.json', 'r') as iocs_json_out_f:
iocs_json_out = json.loads(iocs_json_out_f.read())
assert iocs_json_out == json_out
@pytest.mark.create_values_out_dict
def test_create_values_for_returned_dict_6(self):
"""Test JSON_SEQ out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_JSON_SEQ, CTX_VALUES_KEY, RequestArguments
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_JSON_SEQ)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
json_seq_out = returned_dict.get(CTX_VALUES_KEY)
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_json.json', 'r') as iocs_json_out_f:
iocs_json_out = json.load(iocs_json_out_f)
for seq_line in json_seq_out.split('\n'):
assert json.loads(seq_line) in iocs_json_out
@pytest.mark.create_values_out_dict
def test_create_values_for_returned_dict_7(self):
"""Test CSV out"""
from ExportIndicators import create_values_for_returned_dict, FORMAT_CSV, RequestArguments, CTX_VALUES_KEY
with open('ExportIndicators_test/TestHelperFunctions/demisto_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
request_args = RequestArguments(query='', out_format=FORMAT_CSV)
returned_dict, _ = create_values_for_returned_dict(iocs_json, request_args)
csv_out = returned_dict.get(CTX_VALUES_KEY)
# assert len(csv_out) == IOC_RES_LEN + 1
with open('ExportIndicators_test/TestHelperFunctions/iocs_out_csv_old.txt', 'r') as iocs_out_f:
expected_csv_out = iocs_out_f.read()
for csv_lint in csv_out.split('\n'):
assert csv_lint in expected_csv_out
@pytest.mark.validate_basic_authentication
def test_validate_basic_authentication(self):
"""Test Authentication"""
from ExportIndicators import validate_basic_authentication
username, password = 'user', 'pwd'
data = {
"empty_auth": {},
"basic_missing_auth": {
"Authorization": "missing basic"
},
"colon_missing_auth": {
"Authorization": "Basic bWlzc2luZ19jb2xvbg=="
},
"wrong_length_auth": {
"Authorization": "Basic YTpiOmM="
},
"wrong_credentials_auth": {
"Authorization": "Basic YTpi"
},
"right_credentials_auth": {
"Authorization": "Basic dXNlcjpwd2Q="
}
}
assert not validate_basic_authentication(data.get('empty_auth'), username, password)
assert not validate_basic_authentication(data.get('basic_missing_auth'), username, password)
assert not validate_basic_authentication(data.get('colon_missing_auth'), username, password)
assert not validate_basic_authentication(data.get('wrong_length_auth'), username, password)
assert not validate_basic_authentication(data.get('wrong_credentials_auth'), username, password)
assert validate_basic_authentication(data.get('right_credentials_auth'), username, password)
@pytest.mark.validate_basic_authentication
def test_panos_url_formatting(self):
from ExportIndicators import panos_url_formatting, CTX_VALUES_KEY
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
# strips port numbers
returned_dict, num_of_indicators = panos_url_formatting(iocs=iocs_json, drop_invalids=True, strip_port=True)
returned_output = returned_dict.get(CTX_VALUES_KEY)
assert returned_output == "1.2.3.4/wget\nwww.demisto.com/cool"
assert num_of_indicators == 2
# should ignore indicators with port numbers
returned_dict, num_of_indicators = panos_url_formatting(iocs=iocs_json, drop_invalids=True, strip_port=False)
returned_output = returned_dict.get(CTX_VALUES_KEY)
assert returned_output == 'www.demisto.com/cool'
assert num_of_indicators == 1
@pytest.mark.validate_basic_authentication
def test_create_proxysg_out_format(self):
from ExportIndicators import create_proxysg_out_format, CTX_VALUES_KEY
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
# classify all categories
returned_dict, num_of_indicators = create_proxysg_out_format(iocs=iocs_json, category_default="default",
category_attribute='')
returned_output = returned_dict.get(CTX_VALUES_KEY)
assert returned_output == "define category category2\n1.2.3.4:89/wget\nend\n" \
"define category category1\nhttps://www.demisto.com/cool\nend\n"
assert num_of_indicators == 2
# listed category does not exist - all results should be in default category
returned_dict, num_of_indicators = create_proxysg_out_format(iocs=iocs_json, category_default="default",
category_attribute="category3")
returned_output = returned_dict.get(CTX_VALUES_KEY)
assert returned_output == "define category default\n1.2.3.4:89/wget\n" \
"https://www.demisto.com/cool\nend\n"
assert num_of_indicators == 2
# list category2 only, the rest go to default
returned_dict, num_of_indicators = create_proxysg_out_format(iocs=iocs_json, category_default="default",
category_attribute="category2")
returned_output = returned_dict.get(CTX_VALUES_KEY)
assert returned_output == "define category category2\n1.2.3.4:89/wget\nend\n" \
"define category default\nhttps://www.demisto.com/cool\nend\n"
assert num_of_indicators == 2
@pytest.mark.validate_basic_authentication
def test_create_mwg_out_format(self):
from ExportIndicators import create_mwg_out_format, CTX_VALUES_KEY
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
# listed category does not exist - all results should be in default category
returned_dict = create_mwg_out_format(iocs=iocs_json, mwg_type="ip")
returned_output = returned_dict.get(CTX_VALUES_KEY)
assert returned_output == "type=ip\n\"1.2.3.4:89/wget\" \"AutoFocus Feed\"\n\"" \
"https://www.demisto.com/cool\" \"AutoFocus V2,VirusTotal," \
"Alien Vault OTX TAXII Feed\""
@pytest.mark.validate_basic_authentication
def test_create_json_out_format(self):
from ExportIndicators import create_json_out_format, CTX_VALUES_KEY
with open('ExportIndicators_test/TestHelperFunctions/demisto_url_iocs.json', 'r') as iocs_json_f:
iocs_json = json.loads(iocs_json_f.read())
# listed category does not exist - all results should be in default category
returned_dict = create_json_out_format(iocs=iocs_json)
returned_output = json.loads(returned_dict.get(CTX_VALUES_KEY))
assert returned_output[0].get('indicator') == '1.2.3.4:89/wget'
assert isinstance(returned_output[0].get('value'), dict)
assert returned_output[1].get('indicator') == 'https://www.demisto.com/cool'
assert isinstance(returned_output[1].get('value'), dict)
@pytest.mark.ips_to_ranges
def test_ips_to_ranges_range(self):
from ExportIndicators import ips_to_ranges, COLLAPSE_TO_RANGES
ip_list = [IPAddress("1.1.1.1"), IPAddress("25.24.23.22"), IPAddress("22.21.20.19"),
IPAddress("1.1.1.2"), IPAddress("1.2.3.4"), IPAddress("1.1.1.3"), IPAddress("2.2.2.2"),
IPAddress("1.2.3.5")]
ip_range_list = ips_to_ranges(ip_list, COLLAPSE_TO_RANGES)
assert "1.1.1.1-1.1.1.3" in ip_range_list
assert "1.2.3.4-1.2.3.5" in ip_range_list
assert "1.1.1.2" not in ip_range_list
assert "2.2.2.2" in ip_range_list
assert "25.24.23.22" in ip_range_list
@pytest.mark.ips_to_cidrs
def test_ips_to_ranges_cidr(self):
from ExportIndicators import ips_to_ranges, COLLAPSE_TO_CIDR
ip_list = [IPAddress("1.1.1.1"), IPAddress("25.24.23.22"), IPAddress("22.21.20.19"),
IPAddress("1.1.1.2"), IPAddress("1.2.3.4"), IPAddress("1.1.1.3"), IPAddress("2.2.2.2"),
IPAddress("1.2.3.5")]
ip_range_list = ips_to_ranges(ip_list, COLLAPSE_TO_CIDR)
assert "1.1.1.1" in ip_range_list
assert "1.1.1.2/31" in ip_range_list
assert "1.2.3.4/31" in ip_range_list
assert "1.2.3.5" not in ip_range_list
assert "1.1.1.3" not in ip_range_list
assert "2.2.2.2" in ip_range_list
assert "25.24.23.22" in ip_range_list
| 55.808436
| 125
| 0.655834
| 3,992
| 31,755
| 4.863727
| 0.061122
| 0.052328
| 0.020396
| 0.0548
| 0.897301
| 0.882056
| 0.859858
| 0.824063
| 0.792954
| 0.771735
| 0
| 0.020749
| 0.251771
| 31,755
| 568
| 126
| 55.90669
| 0.796423
| 0.042167
| 0
| 0.577825
| 0
| 0
| 0.157609
| 0.101313
| 0
| 0
| 0
| 0
| 0.140725
| 1
| 0.078891
| false
| 0.014925
| 0.100213
| 0
| 0.181237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1c2e8380043de9ea543efb9b099a5d397f7be960
| 4,030
|
py
|
Python
|
onnx/backend/test/case/node/bitshift.py
|
How-Wang/onnx
|
c940fa3fea84948e46603cab2f86467291443beb
|
[
"Apache-2.0"
] | 1
|
2022-02-04T07:45:14.000Z
|
2022-02-04T07:45:14.000Z
|
onnx/backend/test/case/node/bitshift.py
|
How-Wang/onnx
|
c940fa3fea84948e46603cab2f86467291443beb
|
[
"Apache-2.0"
] | null | null | null |
onnx/backend/test/case/node/bitshift.py
|
How-Wang/onnx
|
c940fa3fea84948e46603cab2f86467291443beb
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class BitShift(Base):
@staticmethod
def export_right_unit8() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="RIGHT"
)
x = np.array([16, 4, 1]).astype(np.uint8)
y = np.array([1, 2, 3]).astype(np.uint8)
z = x >> y # expected output [8, 1, 0]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_right_uint8')
@staticmethod
def export_right_unit16() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="RIGHT"
)
x = np.array([16, 4, 1]).astype(np.uint16)
y = np.array([1, 2, 3]).astype(np.uint16)
z = x >> y # expected output [8, 1, 0]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_right_uint16')
@staticmethod
def export_right_unit32() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="RIGHT"
)
x = np.array([16, 4, 1]).astype(np.uint32)
y = np.array([1, 2, 3]).astype(np.uint32)
z = x >> y # expected output [8, 1, 0]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_right_uint32')
@staticmethod
def export_right_unit64() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="RIGHT"
)
x = np.array([16, 4, 1]).astype(np.uint64)
y = np.array([1, 2, 3]).astype(np.uint64)
z = x >> y # expected output [8, 1, 0]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_right_uint64')
@staticmethod
def export_left_unit8() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="LEFT"
)
x = np.array([16, 4, 1]).astype(np.uint8)
y = np.array([1, 2, 3]).astype(np.uint8)
z = x << y # expected output [32, 16, 8]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_left_uint8')
@staticmethod
def export_left_unit16() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="LEFT"
)
x = np.array([16, 4, 1]).astype(np.uint16)
y = np.array([1, 2, 3]).astype(np.uint16)
z = x << y # expected output [32, 16, 8]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_left_uint16')
@staticmethod
def export_left_unit32() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="LEFT"
)
x = np.array([16, 4, 1]).astype(np.uint32)
y = np.array([1, 2, 3]).astype(np.uint32)
z = x << y # expected output [32, 16, 8]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_left_uint32')
@staticmethod
def export_left_unit64() -> None:
node = onnx.helper.make_node(
'BitShift',
inputs=['x', 'y'],
outputs=['z'],
direction="LEFT"
)
x = np.array([16, 4, 1]).astype(np.uint64)
y = np.array([1, 2, 3]).astype(np.uint64)
z = x << y # expected output [32, 16, 8]
expect(node, inputs=[x, y], outputs=[z],
name='test_bitshift_left_uint64')
| 29.632353
| 50
| 0.507196
| 499
| 4,030
| 3.961924
| 0.12024
| 0.024279
| 0.064745
| 0.121396
| 0.757714
| 0.757714
| 0.757714
| 0.757714
| 0.757714
| 0.757714
| 0
| 0.054275
| 0.332506
| 4,030
| 135
| 51
| 29.851852
| 0.680669
| 0.065509
| 0
| 0.707965
| 0
| 0
| 0.086818
| 0.053795
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070796
| false
| 0
| 0.070796
| 0
| 0.150442
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
98bb8c98c3fbeda4d10ffc9abf50e0da07fcb878
| 31
|
py
|
Python
|
testpkg/subpkg/funcs.py
|
yasteen/ml
|
9ccd17d15c1ef6fe8f30ffebd3dc17a0f1a51a4d
|
[
"MIT"
] | null | null | null |
testpkg/subpkg/funcs.py
|
yasteen/ml
|
9ccd17d15c1ef6fe8f30ffebd3dc17a0f1a51a4d
|
[
"MIT"
] | null | null | null |
testpkg/subpkg/funcs.py
|
yasteen/ml
|
9ccd17d15c1ef6fe8f30ffebd3dc17a0f1a51a4d
|
[
"MIT"
] | null | null | null |
def asdf(x: int):
return x
| 10.333333
| 17
| 0.580645
| 6
| 31
| 3
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.290323
| 31
| 2
| 18
| 15.5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
98e2a7cb2998d28957e8026f4778f2ebd3e6d427
| 32
|
py
|
Python
|
plugins/emotes/__init__.py
|
StarryPy/StarryPy-Historic
|
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
|
[
"WTFPL"
] | 38
|
2015-02-12T11:57:59.000Z
|
2018-11-15T16:03:45.000Z
|
plugins/emotes/__init__.py
|
StarryPy/StarryPy-Historic
|
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
|
[
"WTFPL"
] | 68
|
2015-02-05T23:29:47.000Z
|
2017-12-27T08:26:25.000Z
|
plugins/emotes/__init__.py
|
StarryPy/StarryPy-Historic
|
b9dbd552b8c4631a5a8e9dda98b7ba447eca59da
|
[
"WTFPL"
] | 21
|
2015-02-06T18:58:21.000Z
|
2017-12-24T20:08:59.000Z
|
from emotes import EmotesPlugin
| 16
| 31
| 0.875
| 4
| 32
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7079b6463f10f4113efccbce9b406e7895be940
| 199
|
py
|
Python
|
test_simplemathcaptcha/tests.py
|
BirkbeckCTP/django-simple-math-captcha
|
fcaa1c7f3d1393ce61b5e87fe46e3d5132346299
|
[
"Naumen",
"Condor-1.1",
"Apache-1.1",
"MS-PL"
] | 26
|
2015-07-21T04:02:08.000Z
|
2022-02-04T20:48:45.000Z
|
test_simplemathcaptcha/tests.py
|
BirkbeckCTP/django-simple-math-captcha
|
fcaa1c7f3d1393ce61b5e87fe46e3d5132346299
|
[
"Naumen",
"Condor-1.1",
"Apache-1.1",
"MS-PL"
] | 9
|
2015-08-31T01:33:48.000Z
|
2022-01-30T04:23:37.000Z
|
test_simplemathcaptcha/tests.py
|
BirkbeckCTP/django-simple-math-captcha
|
fcaa1c7f3d1393ce61b5e87fe46e3d5132346299
|
[
"Naumen",
"Condor-1.1",
"Apache-1.1",
"MS-PL"
] | 25
|
2015-03-11T21:33:46.000Z
|
2022-03-18T18:14:26.000Z
|
# flake8: noqa
from __future__ import absolute_import
from .utils_tests import UtilsTests
from .widget_tests import WidgetTests
from .field_tests import FieldTests
from .form_tests import FormTests
| 24.875
| 38
| 0.849246
| 27
| 199
| 5.925926
| 0.555556
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005714
| 0.120603
| 199
| 7
| 39
| 28.428571
| 0.908571
| 0.060302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c786e45ffcf3dbb193fbeb35dffb8e2f5c77aee4
| 15,348
|
py
|
Python
|
test/test_modeling_question_answering.py
|
askainet/haystack
|
00aa1f41d7c21273d8c312a3fad0b51ddd446672
|
[
"Apache-2.0"
] | null | null | null |
test/test_modeling_question_answering.py
|
askainet/haystack
|
00aa1f41d7c21273d8c312a3fad0b51ddd446672
|
[
"Apache-2.0"
] | null | null | null |
test/test_modeling_question_answering.py
|
askainet/haystack
|
00aa1f41d7c21273d8c312a3fad0b51ddd446672
|
[
"Apache-2.0"
] | 1
|
2022-02-17T05:08:53.000Z
|
2022-02-17T05:08:53.000Z
|
import logging
import pytest
from math import isclose
import numpy as np
from haystack.modeling.infer import QAInferencer
from haystack.modeling.data_handler.inputs import QAInput, Question
@pytest.fixture()
def span_inference_result(bert_base_squad2, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
obj_input = [
QAInput(
doc_text="Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
questions=Question("Who counted the game among the best ever made?", uid="best_id_ever"),
)
]
result = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
return result
@pytest.fixture()
def no_answer_inference_result(bert_base_squad2, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
obj_input = [
QAInput(
doc_text='The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet\'s remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.',
questions=Question(
"The Amazon represents less than half of the planets remaining what?", uid="best_id_ever"
),
)
]
result = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
return result
def test_inference_different_inputs(bert_base_squad2):
qa_format_1 = [
{
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
}
]
q = Question(text="Who counted the game among the best ever made?")
qa_format_2 = QAInput(
questions=[q],
doc_text="Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
)
result1 = bert_base_squad2.inference_from_dicts(dicts=qa_format_1)
result2 = bert_base_squad2.inference_from_objects(objects=[qa_format_2])
assert result1 == result2
def test_span_inference_result_ranking_by_confidence(bert_base_squad2, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
obj_input = [
QAInput(
doc_text="Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
questions=Question("Who counted the game among the best ever made?", uid="best_id_ever"),
)
]
# by default, result is sorted by confidence and not by score
result_ranked_by_confidence = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
assert all(
result_ranked_by_confidence.prediction[i].confidence >= result_ranked_by_confidence.prediction[i + 1].confidence
for i in range(len(result_ranked_by_confidence.prediction) - 1)
)
assert not all(
result_ranked_by_confidence.prediction[i].score >= result_ranked_by_confidence.prediction[i + 1].score
for i in range(len(result_ranked_by_confidence.prediction) - 1)
)
# ranking can be adjusted so that result is sorted by score
bert_base_squad2.model.prediction_heads[0].use_confidence_scores_for_ranking = False
result_ranked_by_score = bert_base_squad2.inference_from_objects(obj_input, return_json=False)[0]
assert all(
result_ranked_by_score.prediction[i].score >= result_ranked_by_score.prediction[i + 1].score
for i in range(len(result_ranked_by_score.prediction) - 1)
)
assert not all(
result_ranked_by_score.prediction[i].confidence >= result_ranked_by_score.prediction[i + 1].confidence
for i in range(len(result_ranked_by_score.prediction) - 1)
)
def test_inference_objs(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
assert span_inference_result
def test_span_performance(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
best_pred = span_inference_result.prediction[0]
assert best_pred.answer == "GameTrailers"
best_score_gold = 13.4205
best_score = best_pred.score
assert isclose(best_score, best_score_gold, rel_tol=0.001)
no_answer_gap_gold = 13.9827
no_answer_gap = span_inference_result.no_answer_gap
assert isclose(no_answer_gap, no_answer_gap_gold, rel_tol=0.001)
def test_no_answer_performance(no_answer_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
best_pred = no_answer_inference_result.prediction[0]
assert best_pred.answer == "no_answer"
best_score_gold = 12.1445
best_score = best_pred.score
assert isclose(best_score, best_score_gold, rel_tol=0.001)
no_answer_gap_gold = -14.4646
no_answer_gap = no_answer_inference_result.no_answer_gap
assert isclose(no_answer_gap, no_answer_gap_gold, rel_tol=0.001)
def test_qa_pred_attributes(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
qa_pred = span_inference_result
attributes_gold = [
"aggregation_level",
"answer_types",
"context",
"context_window_size",
"ground_truth_answer",
"id",
"n_passages",
"no_answer_gap",
"prediction",
"question",
"to_json",
"to_squad_eval",
"token_offsets",
]
for ag in attributes_gold:
assert ag in dir(qa_pred)
def test_qa_candidate_attributes(span_inference_result, caplog=None):
if caplog:
caplog.set_level(logging.CRITICAL)
qa_candidate = span_inference_result.prediction[0]
attributes_gold = [
"aggregation_level",
"answer",
"answer_support",
"answer_type",
"context_window",
"n_passages_in_doc",
"offset_answer_end",
"offset_answer_start",
"offset_answer_support_end",
"offset_answer_support_start",
"offset_context_window_end",
"offset_context_window_start",
"offset_unit",
"passage_id",
"probability",
"score",
"set_answer_string",
"set_context_window",
"to_doc_level",
"to_list",
]
for ag in attributes_gold:
assert ag in dir(qa_candidate)
def test_id(span_inference_result, no_answer_inference_result):
assert span_inference_result.id == "best_id_ever"
assert no_answer_inference_result.id == "best_id_ever"
def test_duplicate_answer_filtering(bert_base_squad2):
qa_input = [
{
"questions": ["“In what country lies the Normandy?”"],
"text": """The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\")
raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia.
The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries. Weird things happen in Normandy, France.""",
}
]
bert_base_squad2.model.prediction_heads[0].n_best = 5
bert_base_squad2.model.prediction_heads[0].n_best_per_sample = 5
bert_base_squad2.model.prediction_heads[0].duplicate_filtering = 0
result = bert_base_squad2.inference_from_dicts(dicts=qa_input)
offset_answer_starts = []
offset_answer_ends = []
for answer in result[0]["predictions"][0]["answers"]:
offset_answer_starts.append(answer["offset_answer_start"])
offset_answer_ends.append(answer["offset_answer_end"])
assert len(offset_answer_starts) == len(set(offset_answer_starts))
assert len(offset_answer_ends) == len(set(offset_answer_ends))
def test_no_duplicate_answer_filtering(bert_base_squad2):
qa_input = [
{
"questions": ["“In what country lies the Normandy?”"],
"text": """The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\")
raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia.
The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries. Weird things happen in Normandy, France.""",
}
]
bert_base_squad2.model.prediction_heads[0].n_best = 5
bert_base_squad2.model.prediction_heads[0].n_best_per_sample = 5
bert_base_squad2.model.prediction_heads[0].duplicate_filtering = -1
bert_base_squad2.model.prediction_heads[0].no_ans_boost = -100.0
result = bert_base_squad2.inference_from_dicts(dicts=qa_input)
offset_answer_starts = []
offset_answer_ends = []
for answer in result[0]["predictions"][0]["answers"]:
offset_answer_starts.append(answer["offset_answer_start"])
offset_answer_ends.append(answer["offset_answer_end"])
assert len(offset_answer_starts) != len(set(offset_answer_starts))
assert len(offset_answer_ends) != len(set(offset_answer_ends))
def test_range_duplicate_answer_filtering(bert_base_squad2):
qa_input = [
{
"questions": ["“In what country lies the Normandy?”"],
"text": """The Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\")
raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia.
The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries. Weird things happen in Normandy, France.""",
}
]
bert_base_squad2.model.prediction_heads[0].n_best = 5
bert_base_squad2.model.prediction_heads[0].n_best_per_sample = 5
bert_base_squad2.model.prediction_heads[0].duplicate_filtering = 5
result = bert_base_squad2.inference_from_dicts(dicts=qa_input)
offset_answer_starts = []
offset_answer_ends = []
for answer in result[0]["predictions"][0]["answers"]:
offset_answer_starts.append(answer["offset_answer_start"])
offset_answer_ends.append(answer["offset_answer_end"])
offset_answer_starts.sort()
offset_answer_starts.remove(0)
distances_answer_starts = [j - i for i, j in zip(offset_answer_starts[:-1], offset_answer_starts[1:])]
assert all(
distance > bert_base_squad2.model.prediction_heads[0].duplicate_filtering
for distance in distances_answer_starts
)
offset_answer_ends.sort()
offset_answer_ends.remove(0)
distances_answer_ends = [j - i for i, j in zip(offset_answer_ends[:-1], offset_answer_ends[1:])]
assert all(
distance > bert_base_squad2.model.prediction_heads[0].duplicate_filtering for distance in distances_answer_ends
)
def test_qa_confidence():
inferencer = QAInferencer.load(
"deepset/roberta-base-squad2", task_type="question_answering", batch_size=40, gpu=True
)
QA_input = [
{
"questions": ["Who counted the game among the best ever made?"],
"text": "Twilight Princess was released to universal critical acclaim and commercial success. It received perfect scores from major publications such as 1UP.com, Computer and Video Games, Electronic Gaming Monthly, Game Informer, GamesRadar, and GameSpy. On the review aggregators GameRankings and Metacritic, Twilight Princess has average scores of 95% and 95 for the Wii version and scores of 95% and 96 for the GameCube version. GameTrailers in their review called it one of the greatest games ever created.",
}
]
result = inferencer.inference_from_dicts(dicts=QA_input, return_json=False)[0]
assert np.isclose(result.prediction[0].confidence, 0.990427553653717)
assert result.prediction[0].answer == "GameTrailers"
if __name__ == "__main__":
test_inference_different_inputs()
test_inference_objs()
test_duplicate_answer_filtering()
test_no_duplicate_answer_filtering()
test_range_duplicate_answer_filtering()
test_qa_confidence()
| 50.486842
| 553
| 0.73319
| 2,112
| 15,348
| 5.096117
| 0.160038
| 0.042367
| 0.037722
| 0.022949
| 0.80052
| 0.767165
| 0.75137
| 0.730001
| 0.712998
| 0.705101
| 0
| 0.018492
| 0.19312
| 15,348
| 303
| 554
| 50.653465
| 0.850614
| 0.007623
| 0
| 0.4
| 0
| 0.061224
| 0.430851
| 0.008603
| 0
| 0
| 0
| 0
| 0.097959
| 1
| 0.057143
| false
| 0.012245
| 0.02449
| 0
| 0.089796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c797ca70cbc11a6aa5888cb54f388e8550bdac7d
| 35
|
py
|
Python
|
test/test.py
|
ka2hyeon/allweather
|
95a4030a804f8c50fc88770d55e88e694caeec1b
|
[
"MIT"
] | null | null | null |
test/test.py
|
ka2hyeon/allweather
|
95a4030a804f8c50fc88770d55e88e694caeec1b
|
[
"MIT"
] | null | null | null |
test/test.py
|
ka2hyeon/allweather
|
95a4030a804f8c50fc88770d55e88e694caeec1b
|
[
"MIT"
] | null | null | null |
from unittest import TestCase, main
| 35
| 35
| 0.857143
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c7b03565ff5336c46af49cf6cb7d5f196594d29c
| 71
|
py
|
Python
|
__init__.py
|
bheff88/pylation
|
1e3e9e0cddc09ed7bcdb12dbbb12bb3efaa9ac46
|
[
"MIT"
] | null | null | null |
__init__.py
|
bheff88/pylation
|
1e3e9e0cddc09ed7bcdb12dbbb12bb3efaa9ac46
|
[
"MIT"
] | null | null | null |
__init__.py
|
bheff88/pylation
|
1e3e9e0cddc09ed7bcdb12dbbb12bb3efaa9ac46
|
[
"MIT"
] | null | null | null |
from pylation.mlp import MLP
from pylation.relational import Relational
| 35.5
| 42
| 0.873239
| 10
| 71
| 6.2
| 0.5
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098592
| 71
| 2
| 42
| 35.5
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1be54b71d2db43f3cd71f16d102dc9354e29f9dd
| 100
|
py
|
Python
|
make_json_fixtures.py
|
telia-oss/birgitta-example-etl
|
8bb32aac94486b4edc1fee3964cf7d2dcf095020
|
[
"MIT"
] | 8
|
2019-11-25T16:39:33.000Z
|
2022-03-31T12:48:54.000Z
|
make_json_fixtures.py
|
telia-oss/birgitta-example-etl
|
8bb32aac94486b4edc1fee3964cf7d2dcf095020
|
[
"MIT"
] | 218
|
2019-09-09T11:11:59.000Z
|
2022-03-08T05:16:40.000Z
|
make_json_fixtures.py
|
telia-oss/birgitta-example-etl
|
8bb32aac94486b4edc1fee3964cf7d2dcf095020
|
[
"MIT"
] | 4
|
2020-07-21T15:33:40.000Z
|
2021-12-22T11:32:45.000Z
|
import newsltd_etl
from birgitta.schema.fixtures import json as fx_json
fx_json.make(newsltd_etl)
| 16.666667
| 52
| 0.84
| 17
| 100
| 4.705882
| 0.647059
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11
| 100
| 5
| 53
| 20
| 0.898876
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
403314234487035a51df48ec42a4496b9baee036
| 157
|
py
|
Python
|
tests/tagifai/test_config.py
|
sri-spirited/MLOps
|
2c5235c587870666c9f1569f401875754719d840
|
[
"MIT"
] | 7
|
2021-06-19T12:28:44.000Z
|
2021-09-11T18:41:29.000Z
|
tests/tagifai/test_config.py
|
atulkr28/MLOps
|
c97f18e9c08d6966e1ab4459adc0cc59ec4da243
|
[
"MIT"
] | null | null | null |
tests/tagifai/test_config.py
|
atulkr28/MLOps
|
c97f18e9c08d6966e1ab4459adc0cc59ec4da243
|
[
"MIT"
] | null | null | null |
# tests/tagifai/test_config.py
# Test tagifai/config.py components.
from tagifai import config
def test_config():
assert config.logger.name == "root"
| 17.444444
| 39
| 0.745223
| 22
| 157
| 5.227273
| 0.590909
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146497
| 157
| 8
| 40
| 19.625
| 0.858209
| 0.401274
| 0
| 0
| 0
| 0
| 0.043956
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4040c656cbefd59c255eae763e475762f8ba2365
| 47,713
|
py
|
Python
|
tests/oracle_test.py
|
sh0nk/simple-db-migrate
|
8483a4ae11f5aea5514da55d7ff139a5a1bb2a71
|
[
"Apache-2.0"
] | 120
|
2015-01-22T20:09:41.000Z
|
2021-11-06T00:00:28.000Z
|
tests/oracle_test.py
|
sh0nk/simple-db-migrate
|
8483a4ae11f5aea5514da55d7ff139a5a1bb2a71
|
[
"Apache-2.0"
] | 19
|
2015-01-12T15:01:44.000Z
|
2020-10-12T11:50:01.000Z
|
tests/oracle_test.py
|
sh0nk/simple-db-migrate
|
8483a4ae11f5aea5514da55d7ff139a5a1bb2a71
|
[
"Apache-2.0"
] | 36
|
2015-01-26T15:45:57.000Z
|
2022-01-11T07:00:24.000Z
|
#-*- coding:utf-8 -*-
import unittest
import sys
import simple_db_migrate.core
from mock import patch, Mock, MagicMock, call, sentinel
from simple_db_migrate.oracle import Oracle
from tests import BaseTest
class OracleTest(BaseTest):
def setUp(self):
super(OracleTest, self).setUp()
self.execute_returns = {}
self.fetchone_returns = {'select count(*) from db_version': [0]}
self.close_returns = {}
self.last_execute_command = '';
self.last_execute_commands = [];
self.config_dict = {'database_script_encoding': 'utf8',
'database_encoding': 'American_America.UTF8',
'database_host': 'somehost',
'database_user': 'root',
'database_password': 'migration_test',
'database_name': 'SID',
'database_version_table': 'db_version',
'drop_db_first': False
}
self.config_mock = MagicMock(spec_set=dict, wraps=self.config_dict)
self.cursor_mock = Mock(**{"execute": Mock(side_effect=self.execute_side_effect),
"close": Mock(side_effect=self.close_side_effect),
"fetchone": Mock(side_effect=self.fetchone_side_effect),
"setinputsizes": Mock(return_value = None),
"rowcount": 0})
self.db_mock = Mock(**{"cursor.return_value": self.cursor_mock})
self.db_driver_mock = Mock(**{"connect.return_value": self.db_mock, "CLOB": "CLOB"})
self.stdin_mock = Mock(**{"readline.return_value":"dba_user"})
self.getpass_mock = Mock(return_value = "dba_password")
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_cx_Oracle_as_driver(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
Oracle(self.config_mock)
self.assertNotEqual(0, sys.modules['cx_Oracle'].connect.call_count)
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_default_port(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
sys.modules['cx_Oracle'].makedsn.side_effect = self.makedsn_side_effect
Oracle(self.config_mock)
self.assertEqual(call(dsn="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=somehost)(PORT=1521)))(CONNECT_DATA=(SID=SID)))", password='migration_test', user='root'), sys.modules['cx_Oracle'].connect.call_args)
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_given_configuration(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
sys.modules['cx_Oracle'].makedsn.side_effect = self.makedsn_side_effect
self.config_dict['database_port'] = 9876
Oracle(self.config_mock)
self.assertEqual(call(dsn="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=somehost)(PORT=9876)))(CONNECT_DATA=(SID=SID)))", password='migration_test', user='root'), sys.modules['cx_Oracle'].connect.call_args)
@patch.dict('sys.modules', cx_Oracle=MagicMock())
def test_it_should_use_database_name_as_dsn_when_database_host_is_not_set(self):
sys.modules['cx_Oracle'].connect.return_value = self.db_mock
self.config_dict['database_host'] = None
Oracle(self.config_mock)
self.assertEqual(call(dsn='SID', password='migration_test', user='root'), sys.modules['cx_Oracle'].connect.call_args)
def test_it_should_stop_process_when_an_error_occur_during_connect_database(self):
self.db_driver_mock.connect.side_effect = Exception("error when connecting")
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("could not connect to database: error when connecting", str(e))
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(0, self.db_mock.close.call_count)
self.assertEqual(0, self.cursor_mock.execute.call_count)
self.assertEqual(0, self.cursor_mock.close.call_count)
def test_it_should_create_database_and_version_table_on_init_if_not_exists(self):
self.first_return = Exception("could not connect to database: ORA-01017 invalid user/password")
def connect_side_effect(*args, **kwargs):
ret = sentinel.DEFAULT
if (kwargs['user'] == 'root') and self.first_return:
ret = self.first_return
self.first_return = None
raise ret
return ret
self.db_driver_mock.connect.side_effect = connect_side_effect
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(8, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root'),
call('grant drop public synonym to root'),
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(7, self.cursor_mock.close.call_count)
def test_it_should_ignore_errors_while_dropping_the_sequence_duringthe_create_database_process(self):
self.first_return = Exception("could not connect to database: ORA-01017 invalid user/password")
def connect_side_effect(*args, **kwargs):
ret = sentinel.DEFAULT
if (kwargs['user'] == 'root') and self.first_return:
ret = self.first_return
self.first_return = None
raise ret
return ret
self.db_driver_mock.connect.side_effect = connect_side_effect
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
self.execute_returns["drop sequence db_version_seq"] = Exception("Sequence doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(2, self.db_mock.rollback.call_count)
self.assertEqual(8, self.db_driver_mock.connect.call_count)
self.assertEqual(3, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root'),
call('grant drop public synonym to root'),
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(7, self.cursor_mock.close.call_count)
def test_it_should_create_version_table_on_init_if_not_exists(self):
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(7, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(6, self.cursor_mock.close.call_count)
def test_it_should_drop_database_on_init_if_its_asked(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.fetchone_returns[select_elements_to_drop_sql] = [("DELETE TABLE DB_VERSION CASCADE CONSTRAINTS;",)]
self.execute_returns["select version from db_version"] = Exception("Table doesn't exist")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(9, self.db_driver_mock.connect.call_count)
self.assertEqual(5, self.db_mock.commit.call_count)
self.assertEqual(9, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('DELETE TABLE DB_VERSION CASCADE CONSTRAINTS'),
call('select version from db_version'),
call("create table db_version ( id number(11) not null, version varchar2(20) default '0' NOT NULL, label varchar2(255), name varchar2(255), sql_up clob, sql_down clob, CONSTRAINT db_version_pk PRIMARY KEY (id) ENABLE)"),
call('drop sequence db_version_seq'),
call('create sequence db_version_seq start with 1 increment by 1 nomaxvalue'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(8, self.cursor_mock.close.call_count)
def test_it_should_create_user_when_it_does_not_exists_during_drop_database_selecting_elements_to_drop(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.execute_returns[select_elements_to_drop_sql] = Exception("could not connect to database: ORA-01017 invalid user/password")
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root'),
call('grant drop public synonym to root'),
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_create_user(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.execute_returns[select_elements_to_drop_sql] = Exception("could not connect to database: ORA-01017 invalid user/password")
self.execute_returns['grant create public synonym to root'] = Exception("error when granting")
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("check error: error when granting", str(e))
self.assertEqual(2, self.db_driver_mock.connect.call_count)
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(2, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('create user root identified by migration_test'),
call('grant connect, resource to root'),
call('grant create public synonym to root')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(2, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_drop_database_selecting_elements_to_drop(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.execute_returns[select_elements_to_drop_sql] = Exception("error when dropping")
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("error when dropping", str(e))
self.assertEqual(0, self.db_mock.commit.call_count)
self.assertEqual(1, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql)
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(1, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_drop_elements_from_database_and_user_asked_to_stop(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.fetchone_returns[select_elements_to_drop_sql] = [("DELETE TABLE DB_VERSION CASCADE CONSTRAINTS;",),("DELETE TABLE AUX CASCADE CONSTRAINTS;",)]
self.execute_returns["DELETE TABLE DB_VERSION CASCADE CONSTRAINTS"] = Exception("error dropping table")
self.stdin_mock.readline.return_value = "n"
try:
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.fail("it should not get here")
except Exception as e:
self.assertEqual("can't drop database objects for user 'root'", str(e))
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(1, self.db_mock.commit.call_count)
self.assertEqual(3, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('DELETE TABLE DB_VERSION CASCADE CONSTRAINTS'),
call('DELETE TABLE AUX CASCADE CONSTRAINTS')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(3, self.cursor_mock.close.call_count)
def test_it_should_not_stop_process_when_an_error_occur_during_drop_elements_from_database_and_user_asked_to_continue(self):
select_elements_to_drop_sql = """\
SELECT 'DROP PUBLIC SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = 'PUBLIC' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP SYNONYM ' || SYNONYM_NAME ||';' FROM ALL_SYNONYMS \
WHERE OWNER = '%s' AND TABLE_OWNER = '%s' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||';' FROM USER_OBJECTS \
WHERE OBJECT_TYPE <> 'TABLE' AND OBJECT_TYPE <> 'INDEX' AND \
OBJECT_TYPE<>'TRIGGER' AND OBJECT_TYPE<>'LOB' \
UNION ALL \
SELECT 'DROP ' || OBJECT_TYPE || ' ' || OBJECT_NAME ||' CASCADE CONSTRAINTS;' FROM USER_OBJECTS \
WHERE OBJECT_TYPE = 'TABLE' AND OBJECT_NAME NOT LIKE 'BIN$%%'""" % ('ROOT','ROOT','ROOT')
self.config_dict["drop_db_first"] = True
self.fetchone_returns[select_elements_to_drop_sql] = [("DELETE TABLE DB_VERSION CASCADE CONSTRAINTS;",),("DELETE TABLE AUX CASCADE CONSTRAINTS;",)]
self.execute_returns["DELETE TABLE DB_VERSION CASCADE CONSTRAINTS"] = Exception("error dropping table")
self.stdin_mock.readline.return_value = "y"
Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(3, self.db_mock.commit.call_count)
self.assertEqual(7, self.db_mock.close.call_count)
expected_execute_calls = [
call(select_elements_to_drop_sql),
call('DELETE TABLE DB_VERSION CASCADE CONSTRAINTS'),
call('DELETE TABLE AUX CASCADE CONSTRAINTS'),
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(6, self.cursor_mock.close.call_count)
def test_it_should_execute_migration_up_and_update_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;")
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)', {'label': None, 'sql_up': 'create table spam();', 'version': '20090212112104', 'sql_down': 'drop table spam;', 'migration_file_name': '20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_execute_migration_down_and_update_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("drop table spam;", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", False)
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('drop table spam'),
call('delete from db_version where version = :version', {'version': '20090212112104'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_use_label_version_when_updating_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(4, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)', {'label': "label", 'sql_up': 'create table spam();', 'version': '20090212112104', 'sql_down': 'drop table spam;', 'migration_file_name': '20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(5, self.cursor_mock.close.call_count)
def test_it_should_enforce_sql_up_and_sql_down_type_size_when_updating_schema_version(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
self.assertEqual([call(sql_down='CLOB', sql_up='CLOB')], self.cursor_mock.setinputsizes.mock_calls)
def test_it_should_raise_whem_migration_sql_has_a_syntax_error(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertRaisesWithMessage(Exception, "error executing migration: invalid sql syntax 'create table foo(); create table spam());'", oracle.change,
"create table foo(); create table spam());", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam());", "drop table spam;", label_version="label")
def test_it_should_raise_whem_migration_sql_has_a_syntax_error_sql_with_codec_error(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
expected_raised_message = u"error executing migration: invalid sql syntax 'create table foo(); create table spam()); -- ônibus'"
if (sys.version_info < (3, 0)):
expected_raised_message = expected_raised_message.encode("utf-8")
self.assertRaisesWithMessage(Exception, expected_raised_message, oracle.change,
u"create table foo(); create table spam()); -- ônibus", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table foo(); create table spam());", "drop table spam;", label_version="label")
def test_it_should_stop_process_when_an_error_occur_during_database_change(self):
self.execute_returns["insert into spam"] = Exception("invalid sql")
try:
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam(); insert into spam", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
except Exception as e:
self.assertEqual("error executing migration: invalid sql\n\n[ERROR DETAILS] SQL command was:\ninsert into spam", str(e))
self.assertTrue(isinstance(e, simple_db_migrate.core.exceptions.MigrationException))
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into spam')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_stop_process_when_an_error_occur_during_log_schema_version(self):
self.execute_returns['insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)'] = Exception("invalid sql")
try:
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", label_version="label")
except Exception as e:
self.assertEqual('error logging migration: invalid sql\n\n[ERROR DETAILS] SQL command was:\n20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration', str(e))
self.assertTrue(isinstance(e, simple_db_migrate.core.exceptions.MigrationException))
self.assertEqual(6, self.db_driver_mock.connect.call_count)
self.assertEqual(1, self.db_mock.rollback.call_count)
self.assertEqual(3, self.db_mock.commit.call_count)
self.assertEqual(6, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('create table spam()'),
call('insert into db_version (id, version, label, name, sql_up, sql_down) values (db_version_seq.nextval, :version, :label, :migration_file_name, :sql_up, :sql_down)', {'label': 'label', 'sql_up': 'create table spam();', 'version': '20090212112104', 'sql_down': 'drop table spam;', 'migration_file_name': '20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration'})
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_log_execution_when_a_function_is_given_when_updating_schema_version(self):
execution_log_mock = Mock()
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
oracle.change("create table spam();", "20090212112104", "20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration", "create table spam();", "drop table spam;", execution_log=execution_log_mock)
expected_execution_log_calls = [
call('create table spam()\n-- 0 row(s) affected\n'),
call('migration 20090212112104_test_it_should_execute_migration_down_and_update_schema_version.migration registered\n')
]
self.assertEqual(expected_execution_log_calls, execution_log_mock.mock_calls)
def test_it_should_get_current_schema_version(self):
self.fetchone_returns = {'select count(*) from db_version': [0], 'select version from db_version order by id desc': ["0"]}
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
self.assertEqual("0", oracle.get_current_schema_version())
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('select version from db_version order by id desc')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_all_schema_versions(self):
expected_versions = []
expected_versions.append("0")
expected_versions.append("20090211120001")
expected_versions.append("20090211120002")
expected_versions.append("20090211120003")
self.fetchone_returns["select version from db_version order by id"] = list(zip(expected_versions))
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
schema_versions = oracle.get_all_schema_versions()
self.assertEqual(len(expected_versions), len(schema_versions))
for version in schema_versions:
self.assertTrue(version in expected_versions)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('select version from db_version order by id')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_all_schema_migrations(self):
expected_versions = []
expected_versions.append([1, "0", None, None, None, None])
expected_versions.append([2, "20090211120001", "label", "20090211120001_name", Mock(**{"read.return_value":"sql_up"}), Mock(**{"read.return_value":"sql_down"})])
self.fetchone_returns["select id, version, label, name, sql_up, sql_down from db_version order by id"] = list(expected_versions)
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
schema_migrations = oracle.get_all_schema_migrations()
self.assertEqual(len(expected_versions), len(schema_migrations))
for index, migration in enumerate(schema_migrations):
self.assertEqual(migration.id, expected_versions[index][0])
self.assertEqual(migration.version, expected_versions[index][1])
self.assertEqual(migration.label, expected_versions[index][2])
self.assertEqual(migration.file_name, expected_versions[index][3])
self.assertEqual(migration.sql_up, expected_versions[index][4] and expected_versions[index][4].read() or "")
self.assertEqual(migration.sql_down, expected_versions[index][5] and expected_versions[index][5].read() or "")
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call('select id, version, label, name, sql_up, sql_down from db_version order by id')
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_parse_sql_statements(self):
#TODO include other types of sql
sql = "create table eggs; drop table spam; ; ;\
CREATE OR REPLACE FUNCTION simple \n\
RETURN VARCHAR2 IS \n\
BEGIN \n\
RETURN 'Simple Function'; \n\
END simple; \n\
/ \n\
drop table eggs; \n\
create or replace procedure proc_db_migrate(dias_fim_mes out number) \n\
as v number; \n\
begin \n\
SELECT LAST_DAY(SYSDATE) - SYSDATE \"Days Left\" \n\
into v \n\
FROM DUAL; \n\
dias_fim_mes := v; \n\
end; \n\
\t/ \n\
create OR RePLaCe TRIGGER \"FOLDER_TR\" \n\
BEFORE INSERT ON \"FOLDER\" \n\
FOR EACH ROW WHEN \n\
(\n\
new.\"FOLDER_ID\" IS NULL \n\
)\n\
BEGIN\n\
SELECT \"FOLDER_SQ\".nextval\n\
INTO :new.\"FOLDER_ID\"\n\
FROM dual;\n\
EnD;\n\
/\n\
CREATE OR REPLACE\t PACKAGE pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER;\n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello',\n\
i_str2 VARCHAR2 := 'world', \n\
i_end VARCHAR2 := '!');\n\
END;\n\
/ \n\
CREATE OR REPLACE\n PACKAGE BODY pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER \n\
IS \n\
v_pi NUMBER := 3.14; \n\
BEGIN \n\
RETURN v_pi * (i_rad ** 2); \n\
END; \n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello', i_str2 VARCHAR2 := 'world', i_end VARCHAR2 := '!') \n\
IS \n\
BEGIN \n\
DBMS_OUTPUT.put_line (i_str1 || ',' || i_str2 || i_end); \n\
END; \n\
END; \n\
/ \n\
DECLARE\n\
counter NUMBER(10,8) := 2; \r\n\
pi NUMBER(8,7) := 3.1415926; \n\
test NUMBER(10,8) NOT NULL := 10;\n\
BEGIN \n\
counter := pi/counter; \n\
pi := pi/3; \n\
dbms_output.put_line(counter); \n\
dbms_output.put_line(pi); \n\
END; \n\
/ \n\
BEGIN \n\
dbms_output.put_line('teste de bloco anonimo'); \n\
dbms_output.put_line(select 1 from dual); \n\
END; \n\
/ "
statements = Oracle._parse_sql_statements(sql)
self.assertEqual(10, len(statements))
self.assertEqual('create table eggs', statements[0])
self.assertEqual('drop table spam', statements[1])
self.assertEqual("CREATE OR REPLACE FUNCTION simple \n\
RETURN VARCHAR2 IS \n\
BEGIN \n\
RETURN 'Simple Function'; \n\
END simple;", statements[2])
self.assertEqual('drop table eggs', statements[3])
self.assertEqual('create or replace procedure proc_db_migrate(dias_fim_mes out number) \n\
as v number; \n\
begin \n\
SELECT LAST_DAY(SYSDATE) - SYSDATE \"Days Left\" \n\
into v \n\
FROM DUAL; \n\
dias_fim_mes := v; \n\
end;', statements[4])
self.assertEqual('create OR RePLaCe TRIGGER \"FOLDER_TR\" \n\
BEFORE INSERT ON \"FOLDER\" \n\
FOR EACH ROW WHEN \n\
(\n\
new.\"FOLDER_ID\" IS NULL \n\
)\n\
BEGIN\n\
SELECT \"FOLDER_SQ\".nextval\n\
INTO :new.\"FOLDER_ID\"\n\
FROM dual;\n\
EnD;', statements[5])
self.assertEqual("CREATE OR REPLACE\t PACKAGE pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER;\n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello',\n\
i_str2 VARCHAR2 := 'world', \n\
i_end VARCHAR2 := '!');\n\
END;", statements[6])
self.assertEqual("CREATE OR REPLACE\n PACKAGE BODY pkg_dbm \n\
AS \n\
FUNCTION getArea (i_rad NUMBER) \n\
RETURN NUMBER \n\
IS \n\
v_pi NUMBER := 3.14; \n\
BEGIN \n\
RETURN v_pi * (i_rad ** 2); \n\
END; \n\
PROCEDURE p_print (i_str1 VARCHAR2 := 'hello', i_str2 VARCHAR2 := 'world', i_end VARCHAR2 := '!') \n\
IS \n\
BEGIN \n\
DBMS_OUTPUT.put_line (i_str1 || ',' || i_str2 || i_end); \n\
END; \n\
END;", statements[7])
self.assertEqual("DECLARE\n\
counter NUMBER(10,8) := 2; \r\n\
pi NUMBER(8,7) := 3.1415926; \n\
test NUMBER(10,8) NOT NULL := 10;\n\
BEGIN \n\
counter := pi/counter; \n\
pi := pi/3; \n\
dbms_output.put_line(counter); \n\
dbms_output.put_line(pi); \n\
END;", statements[8])
self.assertEqual("BEGIN \n\
dbms_output.put_line('teste de bloco anonimo'); \n\
dbms_output.put_line(select 1 from dual); \n\
END;", statements[9])
def test_it_should_parse_sql_statements_with_html_inside(self):
sql = u"""
create table eggs;
INSERT INTO widget_parameter_domain (widget_parameter_id, label, value)
VALUES ((SELECT MAX(widget_parameter_id)
FROM widget_parameter), "Carros", '<div class="box-zap-geral">
<div class="box-zap box-zap-autos">
<a class="logo" target="_blank" title="ZAP" href="http://www.zap.com.br/Parceiros/g1/RedirG1.aspx?CodParceriaLink=42&URL=http://www.zap.com.br">');
drop table spam;
"""
statements = Oracle._parse_sql_statements(sql)
expected_sql_with_html = """INSERT INTO widget_parameter_domain (widget_parameter_id, label, value)
VALUES ((SELECT MAX(widget_parameter_id)
FROM widget_parameter), "Carros", '<div class="box-zap-geral">
<div class="box-zap box-zap-autos">
<a class="logo" target="_blank" title="ZAP" href="http://www.zap.com.br/Parceiros/g1/RedirG1.aspx?CodParceriaLink=42&URL=http://www.zap.com.br">')"""
self.assertEqual(3, len(statements))
self.assertEqual('create table eggs', statements[0])
self.assertEqual(expected_sql_with_html, statements[1])
self.assertEqual('drop table spam', statements[2])
def test_it_should_get_none_for_a_non_existent_version_in_database(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
ret = oracle.get_version_id_from_version_number('xxx')
self.assertEqual(None, ret)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call("select id from db_version where version = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_most_recent_version_for_a_existent_label_in_database(self):
self.fetchone_returns["select version from db_version where label = 'xxx' order by id desc"] = ["vesion", "version2", "version3"]
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
ret = oracle.get_version_number_from_label('xxx')
self.assertEqual("vesion", ret)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call("select version from db_version where label = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def test_it_should_get_none_for_a_non_existent_label_in_database(self):
oracle = Oracle(self.config_mock, self.db_driver_mock, self.getpass_mock, self.stdin_mock)
ret = oracle.get_version_number_from_label('xxx')
self.assertEqual(None, ret)
self.assertEqual(5, self.db_driver_mock.connect.call_count)
self.assertEqual(2, self.db_mock.commit.call_count)
self.assertEqual(5, self.db_mock.close.call_count)
expected_execute_calls = [
call('select version from db_version'),
call('select count(*) from db_version'),
call("insert into db_version (id, version) values (db_version_seq.nextval, '0')"),
call("select version from db_version where label = 'xxx' order by id desc")
]
self.assertEqual(expected_execute_calls, self.cursor_mock.execute.mock_calls)
self.assertEqual(4, self.cursor_mock.close.call_count)
def side_effect(self, returns, default_value):
commands = len(self.last_execute_commands)
if commands > 0:
self.last_execute_command = self.last_execute_commands[commands - 1]
value = result = returns.pop(self.last_execute_command, default_value)
if isinstance(result, Exception):
if commands > 0:
self.last_execute_commands.pop()
raise result
if isinstance(result, list) and len(result) > 0 and (isinstance(result[0], tuple) or isinstance(result[0], list)):
returns[self.last_execute_command] = result
value = result.pop(0)
elif isinstance(result, list) and len(result) == 0:
value = None
if commands > 0 and \
self.execute_returns.get(self.last_execute_command, None) is None and \
self.fetchone_returns.get(self.last_execute_command, None) is None and \
self.close_returns.get(self.last_execute_command, None) is None:
self.last_execute_commands.pop()
return value
def execute_side_effect(self, *args):
self.last_execute_commands.append(args[0])
return self.side_effect(self.execute_returns, 0)
def fetchone_side_effect(self, *args):
return self.side_effect(self.fetchone_returns, None)
def close_side_effect(self, *args):
return self.side_effect(self.close_returns, None)
def makedsn_side_effect(self, host, port, sid):
return "(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=%s)(PORT=%s)))(CONNECT_DATA=(SID=%s)))" % (host, port, sid)
if __name__ == "__main__":
unittest.main()
| 53.549944
| 409
| 0.659296
| 6,149
| 47,713
| 4.838022
| 0.059522
| 0.073112
| 0.018152
| 0.02474
| 0.873307
| 0.853272
| 0.838045
| 0.82665
| 0.81438
| 0.801136
| 0
| 0.021306
| 0.226815
| 47,713
| 890
| 410
| 53.610112
| 0.785097
| 0.001069
| 0
| 0.667114
| 0
| 0.022819
| 0.348384
| 0.053
| 0
| 0
| 0
| 0.001124
| 0.202685
| 1
| 0.052349
| false
| 0.045638
| 0.008054
| 0.004027
| 0.071141
| 0.005369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4086a0653c503f236d614ba07fa09dcd593cae59
| 55
|
py
|
Python
|
p003-2.py
|
jz1007/project-euler
|
4c821d3412371718ab5ecc45b5c877cc631d2ee1
|
[
"MIT"
] | null | null | null |
p003-2.py
|
jz1007/project-euler
|
4c821d3412371718ab5ecc45b5c877cc631d2ee1
|
[
"MIT"
] | null | null | null |
p003-2.py
|
jz1007/project-euler
|
4c821d3412371718ab5ecc45b5c877cc631d2ee1
|
[
"MIT"
] | null | null | null |
import primefac as pf
print(pf.primefac(600851475143))
| 18.333333
| 32
| 0.818182
| 8
| 55
| 5.625
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.24
| 0.090909
| 55
| 3
| 32
| 18.333333
| 0.66
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
40aaa8c9a5f8f0eb8011dec0e02088a6da0e296e
| 18
|
py
|
Python
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/package3/src/b.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/package3/src/b.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/package3/src/b.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
def b():
pass
| 6
| 8
| 0.444444
| 3
| 18
| 2.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.388889
| 18
| 2
| 9
| 9
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
40c5a1c93479cfa5db5b228580015b2c773f4eea
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/distlib/manifest.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/distlib/manifest.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/distlib/manifest.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/9d/01/21/626828ade681673c85cf062c5f124046eddfa38124ba7535eb7535ea21
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.458333
| 0
| 96
| 1
| 96
| 96
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
40edd5b3254e582e8da0eac1036d32532379b525
| 46
|
py
|
Python
|
server/users/tests/__init__.py
|
Sonatrix/reango
|
5533bb126a5972f7f4124c6e9a26c6207596ff80
|
[
"MIT"
] | 60
|
2016-12-12T19:46:41.000Z
|
2019-04-29T05:09:50.000Z
|
server/users/tests/__init__.py
|
Sonatrix/reango
|
5533bb126a5972f7f4124c6e9a26c6207596ff80
|
[
"MIT"
] | 28
|
2016-11-06T19:25:38.000Z
|
2018-06-11T22:43:01.000Z
|
server/users/tests/__init__.py
|
ncrmro/ango
|
15bca070ed01ec8fa885a224305d1ac67d458b47
|
[
"MIT"
] | 14
|
2016-11-30T22:01:12.000Z
|
2019-03-07T22:45:09.000Z
|
from .browser import *
from .graphql import *
| 15.333333
| 22
| 0.73913
| 6
| 46
| 5.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9051b1de8a64b358a5a18c6fcf53bda32d94464a
| 176
|
py
|
Python
|
pyslash/__init__.py
|
chatorbot/pyslash
|
2e42fd4cde7b2957545c88b86231f3204f59de2c
|
[
"MIT"
] | null | null | null |
pyslash/__init__.py
|
chatorbot/pyslash
|
2e42fd4cde7b2957545c88b86231f3204f59de2c
|
[
"MIT"
] | null | null | null |
pyslash/__init__.py
|
chatorbot/pyslash
|
2e42fd4cde7b2957545c88b86231f3204f59de2c
|
[
"MIT"
] | null | null | null |
from .patcher import commands_init, slash_command_wrapper, slash_command_parent, update_commands_list
from .slash_command import CommandsContext, CommandsMessage, SlashCommand
| 58.666667
| 101
| 0.886364
| 21
| 176
| 7.047619
| 0.666667
| 0.243243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073864
| 176
| 2
| 102
| 88
| 0.907975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90587dc4e7db4e03097ad8ae09202343e5b3db30
| 16,173
|
py
|
Python
|
tests/expectations/test_expectation_arguments.py
|
dz-1/great_expectations
|
8caa2e78d71a4a49dd0c4175c328419b02c06e3b
|
[
"Apache-2.0"
] | null | null | null |
tests/expectations/test_expectation_arguments.py
|
dz-1/great_expectations
|
8caa2e78d71a4a49dd0c4175c328419b02c06e3b
|
[
"Apache-2.0"
] | null | null | null |
tests/expectations/test_expectation_arguments.py
|
dz-1/great_expectations
|
8caa2e78d71a4a49dd0c4175c328419b02c06e3b
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import List
import pandas as pd
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations.core import (
ExpectationConfiguration,
ExpectationSuite,
ExpectationSuiteValidationResult,
ExpectationValidationResult,
)
from great_expectations.core.batch import RuntimeBatchRequest
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import (
DataContextConfig,
InMemoryStoreBackendDefaults,
)
from great_expectations.validator.validator import Validator
logger = logging.getLogger(__name__)
try:
from pyspark.sql import DataFrame
except ImportError:
DataFrame = None
logger.debug(
"Unable to load pyspark; install optional spark dependency for support."
)
@pytest.fixture
def in_memory_runtime_context():
data_context_config: DataContextConfig = DataContextConfig(
datasources={
"pandas_datasource": {
"execution_engine": {
"class_name": "PandasExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"data_connectors": {
"runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"batch_identifiers": [
"id_key_0",
"id_key_1",
],
}
},
},
"spark_datasource": {
"execution_engine": {
"class_name": "SparkDFExecutionEngine",
"module_name": "great_expectations.execution_engine",
},
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"data_connectors": {
"runtime_data_connector": {
"class_name": "RuntimeDataConnector",
"batch_identifiers": [
"id_key_0",
"id_key_1",
],
}
},
},
},
expectations_store_name="expectations_store",
validations_store_name="validations_store",
evaluation_parameter_store_name="evaluation_parameter_store",
checkpoint_store_name="checkpoint_store",
store_backend_defaults=InMemoryStoreBackendDefaults(),
)
context: BaseDataContext = BaseDataContext(project_config=data_context_config)
return context
@pytest.fixture
def test_pandas_df():
df: pd.DataFrame = pd.DataFrame(
data=[["Scott"], ["Jeff"], ["Thomas"], ["Ann"]], columns=["Name"]
)
return df
@pytest.fixture
def test_spark_df(test_pandas_df, spark_session):
df: DataFrame = spark_session.createDataFrame(data=test_pandas_df)
return df
def test_catch_exceptions_no_exceptions(in_memory_runtime_context, test_spark_df):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: str = "SUMMARY"
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
expectation_arguments: dict = {
"include_config": True,
"column": "Name", # use correct column to avoid error
}
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict = dict(
**runtime_environment_arguments, **expectation_arguments
)
expectation_configuration: ExpectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult = validator.validate(
**runtime_environment_arguments
)
results: List[ExpectationValidationResult] = validator_validation.results
assert len(results) == 1
result: ExpectationValidationResult
result = results[0]
assert (
"exception_traceback" not in result.exception_info
) or not result.exception_info["exception_traceback"]
assert (
"exception_message" not in result.exception_info
) or not result.exception_info["exception_message"]
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert (
"exception_traceback" not in result.exception_info
) or not result.exception_info["exception_traceback"]
assert (
"exception_message" not in result.exception_info
) or not result.exception_info["exception_message"]
def test_catch_exceptions_exception_occurred_catch_exceptions_false(
in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: str = "SUMMARY"
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
expectation_arguments: dict = {
"include_config": True,
"column": "unknown_column", # use intentionally incorrect column to force error in "MetricProvider" evaluations
}
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict = dict(
**runtime_environment_arguments, **expectation_arguments
)
expectation_configuration: ExpectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
expected_exception_message: str = (
'Error: The column "unknown_column" in BatchData does not exist.'
)
# Test calling "validator.validate()" explicitly.
with pytest.raises(ge_exceptions.MetricResolutionError) as e:
# noinspection PyUnusedLocal
validator_validation: ExpectationSuiteValidationResult = validator.validate(
**runtime_environment_arguments
)
assert e.value.message == expected_exception_message
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict = dict(
**expectation_arguments_without_meta, **expectation_meta
)
with pytest.raises(ge_exceptions.MetricResolutionError) as e:
# noinspection PyUnusedLocal
result: ExpectationValidationResult = (
validator.expect_column_values_to_not_be_null(**expectation_parameters)
)
assert e.value.message == expected_exception_message
def test_catch_exceptions_exception_occurred_catch_exceptions_true(
in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = True # expect exceptions to be caught
result_format: str = "SUMMARY"
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
expectation_arguments: dict = {
"include_config": True,
"column": "unknown_column", # use intentionally incorrect column to force error in "MetricProvider" evaluations
}
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict = dict(
**runtime_environment_arguments, **expectation_arguments
)
expectation_configuration: ExpectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
expected_exception_message: str = (
'Error: The column "unknown_column" in BatchData does not exist.'
)
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult = validator.validate(
**runtime_environment_arguments
)
results: List[ExpectationValidationResult] = validator_validation.results
assert len(results) == 1
result: ExpectationValidationResult
result = results[0]
assert "exception_traceback" in result.exception_info
assert "exception_message" in result.exception_info
assert result.exception_info["exception_message"] == expected_exception_message
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert "exception_traceback" in result.exception_info
assert "exception_message" in result.exception_info
assert result.exception_info["exception_message"] == expected_exception_message
def test_result_format_configured_no_set_default_override(
in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: str = "SUMMARY"
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
expectation_arguments: dict = {
"include_config": True,
"column": "Name", # use correct column to avoid error
}
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict = dict(
**runtime_environment_arguments, **expectation_arguments
)
expectation_configuration: ExpectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult = validator.validate(
**runtime_environment_arguments
)
results: List[ExpectationValidationResult] = validator_validation.results
assert len(results) == 1
result: ExpectationValidationResult
result = results[0]
assert len(result.result.keys()) > 0
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict = dict(
**expectation_arguments_without_meta, **expectation_meta
)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert len(result.result.keys()) > 0
def test_result_format_configured_with_set_default_override(
in_memory_runtime_context, test_spark_df
):
catch_exceptions: bool = False # expect exceptions to be raised
result_format: str = "SUMMARY"
runtime_environment_arguments = {
"catch_exceptions": catch_exceptions,
"result_format": result_format,
}
expectation_arguments: dict = {
"include_config": True,
"column": "Name", # use correct column to avoid error
}
expectation_meta: dict = {"Notes": "Some notes"}
expectation_arguments_without_meta: dict = dict(
**runtime_environment_arguments, **expectation_arguments
)
expectation_configuration: ExpectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs=expectation_arguments_without_meta,
meta=expectation_meta,
)
suite: ExpectationSuite = in_memory_runtime_context.create_expectation_suite(
"test_suite", overwrite_existing=True
)
suite.add_expectation(expectation_configuration=expectation_configuration)
runtime_batch_request = RuntimeBatchRequest(
datasource_name="spark_datasource",
data_connector_name="runtime_data_connector",
data_asset_name="insert_your_data_asset_name_here",
runtime_parameters={"batch_data": test_spark_df},
batch_identifiers={
"id_key_0": "id_value_0",
"id_key_1": "id_value_1",
},
)
validator: Validator = in_memory_runtime_context.get_validator(
batch_request=runtime_batch_request,
expectation_suite=suite,
)
validator.set_default_expectation_argument("result_format", "BOOLEAN_ONLY")
# Test calling "validator.validate()" explicitly.
validator_validation: ExpectationSuiteValidationResult = validator.validate()
results: List[ExpectationValidationResult] = validator_validation.results
assert len(results) == 1
result: ExpectationValidationResult
result = results[0]
assert len(result.result.keys()) == 0
# Test calling "validator.expect_*" through "validator.validate_expectation()".
expectation_parameters: dict = dict(**expectation_arguments, **expectation_meta)
result = validator.expect_column_values_to_not_be_null(**expectation_parameters)
assert len(result.result.keys()) == 0
| 34.855603
| 120
| 0.703024
| 1,589
| 16,173
| 6.745752
| 0.108874
| 0.046646
| 0.02239
| 0.032839
| 0.868738
| 0.851665
| 0.851012
| 0.847934
| 0.839071
| 0.839071
| 0
| 0.002843
| 0.216967
| 16,173
| 463
| 121
| 34.930886
| 0.843572
| 0.068262
| 0
| 0.674931
| 0
| 0
| 0.153529
| 0.045594
| 0
| 0
| 0
| 0
| 0.055096
| 1
| 0.022039
| false
| 0
| 0.033058
| 0
| 0.063361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9077c64fdbb426583cf23b4bfc6f418347dbd11d
| 32
|
py
|
Python
|
gajou_api/src/gajou_api/http/__init__.py
|
ArtyomKomarenko/kaa
|
de9c7ed9ae51378597e1ab1e3d6c285d58fb2d34
|
[
"MIT"
] | 3
|
2021-11-10T16:28:05.000Z
|
2021-12-01T13:26:19.000Z
|
gajou_api/src/gajou_api/http/__init__.py
|
ArtyomKomarenko/kaa
|
de9c7ed9ae51378597e1ab1e3d6c285d58fb2d34
|
[
"MIT"
] | 2
|
2022-03-16T07:09:21.000Z
|
2022-03-25T12:23:07.000Z
|
gajou_api/src/gajou_api/http/__init__.py
|
ArtyomKomarenko/gajou
|
de9c7ed9ae51378597e1ab1e3d6c285d58fb2d34
|
[
"MIT"
] | null | null | null |
from .base_http import BaseHTTP
| 16
| 31
| 0.84375
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
907d2f14e39e05094d808fe6504737e56edfe6f7
| 295
|
py
|
Python
|
pytsp/core/__init__.py
|
billsioros/pytsp
|
7f3a8172bcb3bb9bec8655dcb490099b60a4c962
|
[
"MIT"
] | 7
|
2020-07-09T10:26:28.000Z
|
2021-06-13T06:40:30.000Z
|
pytsp/core/__init__.py
|
billsioros/pytsp
|
7f3a8172bcb3bb9bec8655dcb490099b60a4c962
|
[
"MIT"
] | null | null | null |
pytsp/core/__init__.py
|
billsioros/pytsp
|
7f3a8172bcb3bb9bec8655dcb490099b60a4c962
|
[
"MIT"
] | null | null | null |
from pytsp.core.annealing import (AnnealingMixin, CompressedAnnealing,
SimulatedAnnealing)
from pytsp.core.genetic import GeneticAlgorithm
from pytsp.core.util import Model, cached, jarvis
from pytsp.core.tsp import TravellingSalesman, TravellingSalesmanTimeWindows
| 42.142857
| 76
| 0.779661
| 29
| 295
| 7.931034
| 0.586207
| 0.156522
| 0.226087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 295
| 6
| 77
| 49.166667
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
908ef0a56f8ed3bc4042554f27f9748d5a962e21
| 3,024
|
py
|
Python
|
tests/components/knx/test_expose.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
tests/components/knx/test_expose.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
tests/components/knx/test_expose.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 6
|
2018-02-04T03:48:55.000Z
|
2022-01-24T20:37:04.000Z
|
"""Test knx expose."""
from homeassistant.components.knx import CONF_KNX_EXPOSE, KNX_ADDRESS
from homeassistant.const import CONF_ATTRIBUTE, CONF_ENTITY_ID, CONF_TYPE
from . import setup_knx_integration
async def test_binary_expose(hass, knx_ip_interface_mock):
"""Test that a binary expose sends only telegrams on state change."""
entity_id = "fake.entity"
await setup_knx_integration(
hass,
knx_ip_interface_mock,
{
CONF_KNX_EXPOSE: {
CONF_TYPE: "binary",
KNX_ADDRESS: "1/1/8",
CONF_ENTITY_ID: entity_id,
}
},
)
assert not hass.states.async_all()
# Change state to on
knx_ip_interface_mock.reset_mock()
hass.states.async_set(entity_id, "on", {})
await hass.async_block_till_done()
assert (
knx_ip_interface_mock.send_telegram.call_count == 1
), "Expected telegram for state change"
# Change attribute; keep state
knx_ip_interface_mock.reset_mock()
hass.states.async_set(entity_id, "on", {"brightness": 180})
await hass.async_block_till_done()
assert (
knx_ip_interface_mock.send_telegram.call_count == 0
), "Expected no telegram; state not changed"
# Change attribute and state
knx_ip_interface_mock.reset_mock()
hass.states.async_set(entity_id, "off", {"brightness": 0})
await hass.async_block_till_done()
assert (
knx_ip_interface_mock.send_telegram.call_count == 1
), "Expected telegram for state change"
async def test_expose_attribute(hass, knx_ip_interface_mock):
"""Test that an expose sends only telegrams on attribute change."""
entity_id = "fake.entity"
attribute = "fake_attribute"
await setup_knx_integration(
hass,
knx_ip_interface_mock,
{
CONF_KNX_EXPOSE: {
CONF_TYPE: "percentU8",
KNX_ADDRESS: "1/1/8",
CONF_ENTITY_ID: entity_id,
CONF_ATTRIBUTE: attribute,
}
},
)
assert not hass.states.async_all()
# Change state to on; no attribute
knx_ip_interface_mock.reset_mock()
hass.states.async_set(entity_id, "on", {})
await hass.async_block_till_done()
assert knx_ip_interface_mock.send_telegram.call_count == 0
# Change attribute; keep state
knx_ip_interface_mock.reset_mock()
hass.states.async_set(entity_id, "on", {attribute: 1})
await hass.async_block_till_done()
assert knx_ip_interface_mock.send_telegram.call_count == 1
# Change state keep attribute
knx_ip_interface_mock.reset_mock()
hass.states.async_set(entity_id, "off", {attribute: 1})
await hass.async_block_till_done()
assert knx_ip_interface_mock.send_telegram.call_count == 0
# Change state and attribute
knx_ip_interface_mock.reset_mock()
hass.states.async_set(entity_id, "on", {attribute: 0})
await hass.async_block_till_done()
assert knx_ip_interface_mock.send_telegram.call_count == 1
| 32.869565
| 73
| 0.681217
| 405
| 3,024
| 4.723457
| 0.150617
| 0.047047
| 0.13173
| 0.169367
| 0.794041
| 0.741767
| 0.741767
| 0.710403
| 0.710403
| 0.710403
| 0
| 0.00899
| 0.227513
| 3,024
| 91
| 74
| 33.230769
| 0.809932
| 0.069114
| 0
| 0.641791
| 0
| 0
| 0.07649
| 0
| 0
| 0
| 0
| 0
| 0.134328
| 1
| 0
| false
| 0
| 0.044776
| 0
| 0.044776
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
90a3d2cec3576fa9a48bf38cff4b6f95da7ba0b7
| 22
|
py
|
Python
|
contrib/tools/python/src/Lib/plat-mac/Carbon/Qdoffs.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
python/src/Lib/plat-mac/Carbon/Qdoffs.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
python/src/Lib/plat-mac/Carbon/Qdoffs.py
|
weiqiangzheng/sl4a
|
d3c17dca978cbeee545e12ea240a9dbf2a6999e9
|
[
"Apache-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
from _Qdoffs import *
| 11
| 21
| 0.772727
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90d4db54417af84e30192b21cfdf4bab0437798b
| 175
|
py
|
Python
|
src/graph_transpiler/webdnn/backend/webgpu/kernels/tan.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | 1
|
2021-04-09T15:55:35.000Z
|
2021-04-09T15:55:35.000Z
|
src/graph_transpiler/webdnn/backend/webgpu/kernels/tan.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
src/graph_transpiler/webdnn/backend/webgpu/kernels/tan.py
|
steerapi/webdnn
|
1df51cc094e5a528cfd3452c264905708eadb491
|
[
"MIT"
] | null | null | null |
from webdnn.backend.webgpu.kernels.elementwise import register_elementwise_kernel
from webdnn.graph.operators.tan import Tan
register_elementwise_kernel(Tan, "y = tan(x0);")
| 35
| 81
| 0.834286
| 24
| 175
| 5.916667
| 0.583333
| 0.140845
| 0.352113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006173
| 0.074286
| 175
| 4
| 82
| 43.75
| 0.87037
| 0
| 0
| 0
| 0
| 0
| 0.068571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
90f5d25330153b1faf0adfa3b4e230d7f587bb5e
| 126
|
py
|
Python
|
txhlf/constants.py
|
jaarce/txhlf
|
dbcd301034f4055cda9e4454ebb7716830a5d92c
|
[
"BSD-3-Clause"
] | null | null | null |
txhlf/constants.py
|
jaarce/txhlf
|
dbcd301034f4055cda9e4454ebb7716830a5d92c
|
[
"BSD-3-Clause"
] | null | null | null |
txhlf/constants.py
|
jaarce/txhlf
|
dbcd301034f4055cda9e4454ebb7716830a5d92c
|
[
"BSD-3-Clause"
] | null | null | null |
BASE_URL = 'https://67855FB478D442A3B541C51156D2DF84.blockchain.ocp.oraclecloud.com:443/restproxy1/bcsgw/rest/v1/transaction/'
| 126
| 126
| 0.849206
| 14
| 126
| 7.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 0.015873
| 126
| 1
| 126
| 126
| 0.629032
| 0
| 0
| 0
| 0
| 1
| 0.889764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29077d7003bc115f282a0224893f98cbcb0230b4
| 18,860
|
py
|
Python
|
datadog_checks_base/tests/test_metadata.py
|
tony612/integrations-core
|
eb2d97909bceea8296b931974e78d467c75c7470
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_base/tests/test_metadata.py
|
tony612/integrations-core
|
eb2d97909bceea8296b931974e78d467c75c7470
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_base/tests/test_metadata.py
|
tony612/integrations-core
|
eb2d97909bceea8296b931974e78d467c75c7470
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import logging
import re
from collections import OrderedDict
import mock
import pytest
from datadog_checks.base import AgentCheck
pytestmark = pytest.mark.metadata
SET_CHECK_METADATA_METHOD = 'datadog_checks.base.stubs.datadog_agent.set_check_metadata'
# The order is used to derive the display name for the regex tests
NON_STANDARD_VERSIONS = OrderedDict()
class TestAttribute:
def test_default(self):
check = AgentCheck('test', {}, [{}])
assert check._metadata_manager is None
def test_no_check_id_error(self):
check = AgentCheck('test', {}, [{}])
with mock.patch('datadog_checks.base.checks.base.using_stub_aggregator', False):
with pytest.raises(RuntimeError):
check.set_metadata('foo', 'bar')
class TestRaw:
def test_default(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('foo', 'bar')
m.assert_called_once_with('test:123', 'foo', 'bar')
def test_new_transformer(self):
class NewAgentCheck(AgentCheck):
METADATA_TRANSFORMERS = {'foo': lambda value, options: value[::-1]}
check = NewAgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('foo', 'bar')
m.assert_called_once_with('test:123', 'foo', 'rab')
class TestVersion:
def test_override_allowed(self):
class NewAgentCheck(AgentCheck):
METADATA_TRANSFORMERS = {'version': lambda value, options: value[::-1]}
check = NewAgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', 'bar')
m.assert_called_once_with('test:123', 'version', 'rab')
def test_unknown_scheme(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.0', scheme='foo')
assert m.call_count == 0
expected_message = 'Unable to transform `version` metadata value `1.0.0`: Unsupported version scheme `foo`'
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_semver_default(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.5')
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '0')
m.assert_any_call('test:123', 'version.patch', '5')
m.assert_any_call('test:123', 'version.raw', '1.0.5')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 5
def test_semver_release(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.5-gke.6', scheme='semver')
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '0')
m.assert_any_call('test:123', 'version.patch', '5')
m.assert_any_call('test:123', 'version.release', 'gke.6')
m.assert_any_call('test:123', 'version.raw', '1.0.5-gke.6')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 6
def test_semver_release_and_build(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.5-gke.6+3', scheme='semver')
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '0')
m.assert_any_call('test:123', 'version.patch', '5')
m.assert_any_call('test:123', 'version.release', 'gke.6')
m.assert_any_call('test:123', 'version.build', '3')
m.assert_any_call('test:123', 'version.raw', '1.0.5-gke.6+3')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 7
def test_semver_invalid(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0', scheme='semver')
assert m.call_count == 0
expected_prefix = 'Unable to transform `version` metadata value `1.0`: '
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message.startswith(expected_prefix):
break
else:
raise AssertionError('Expected ERROR log starting with message: {}'.format(expected_prefix))
@pytest.mark.parametrize(
'version, pattern, expected_parts',
[
(
NON_STANDARD_VERSIONS.setdefault('Docker', '18.03.0-ce, build 0520e24'),
r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)-(?P<release>\w+), build (?P<build>\w+)',
{'major': '18', 'minor': '03', 'patch': '0', 'release': 'ce', 'build': '0520e24'},
),
(
NON_STANDARD_VERSIONS.setdefault('Exchange Server', '2007 SP3 8.3.83.006'),
r'(?P<major>\d+) SP(?P<minor>\d+) (?P<build>[\w.]+)',
{'major': '2007', 'minor': '3', 'build': '8.3.83.006'},
),
(NON_STANDARD_VERSIONS.setdefault('Oracle', '19c'), r'(?P<major>\d+)\w*', {'major': '19'}),
(
NON_STANDARD_VERSIONS.setdefault('Presto', '0.221'),
r'(?P<major>\d+).(?P<minor>\d+)',
{'major': '0', 'minor': '221'},
),
(
NON_STANDARD_VERSIONS.setdefault('missing subgroup', '02'),
r'(?P<major>\d+)(\.(?P<minor>\d+))?',
{'major': '02'},
),
(
NON_STANDARD_VERSIONS.setdefault('precompiled', '1.2.3'),
re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)'),
{'major': '1', 'minor': '2', 'patch': '3'},
),
],
ids=list(NON_STANDARD_VERSIONS),
)
def test_regex(self, version, pattern, expected_parts):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', version, scheme='regex', pattern=pattern)
for name, value in expected_parts.items():
m.assert_any_call('test:123', 'version.{}'.format(name), value)
m.assert_any_call('test:123', 'version.raw', version)
m.assert_any_call('test:123', 'version.scheme', 'test')
assert m.call_count == len(expected_parts) + 2
def test_regex_final_scheme(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata(
'version',
'1.2.3.beta',
scheme='regex',
final_scheme='semver',
pattern=r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+).(?P<release>\w+)',
)
m.assert_any_call('test:123', 'version.major', '1')
m.assert_any_call('test:123', 'version.minor', '2')
m.assert_any_call('test:123', 'version.patch', '3')
m.assert_any_call('test:123', 'version.release', 'beta')
m.assert_any_call('test:123', 'version.raw', '1.2.3.beta')
m.assert_any_call('test:123', 'version.scheme', 'semver')
assert m.call_count == 6
def test_regex_no_pattern(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0', scheme='regex')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0`: Version scheme `regex` requires a `pattern` option'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_regex_no_match(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.0', scheme='regex', pattern='foo')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0.0`: '
'Version does not match the regular expression pattern'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_regex_no_subgroups(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0.0', scheme='regex', pattern=r'\d\.\d\.\d')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0.0`: '
'Regular expression pattern has no named subgroups'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_parts(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata(
'version',
'19.15.2.2',
scheme='parts',
part_map={'year': '19', 'major': '15', 'minor': '2', 'patch': '2', 'revision': '56789'},
)
m.assert_any_call('test:123', 'version.year', '19')
m.assert_any_call('test:123', 'version.major', '15')
m.assert_any_call('test:123', 'version.minor', '2')
m.assert_any_call('test:123', 'version.patch', '2')
m.assert_any_call('test:123', 'version.revision', '56789')
m.assert_any_call('test:123', 'version.raw', '19.15.2.2')
m.assert_any_call('test:123', 'version.scheme', 'test')
assert m.call_count == 7
def test_parts_final_scheme(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata(
'version',
'19.15.2.2',
scheme='parts',
final_scheme='calver',
part_map={'year': '19', 'major': '15', 'minor': '2', 'patch': '2', 'revision': '56789'},
)
m.assert_any_call('test:123', 'version.year', '19')
m.assert_any_call('test:123', 'version.major', '15')
m.assert_any_call('test:123', 'version.minor', '2')
m.assert_any_call('test:123', 'version.patch', '2')
m.assert_any_call('test:123', 'version.revision', '56789')
m.assert_any_call('test:123', 'version.raw', '19.15.2.2')
m.assert_any_call('test:123', 'version.scheme', 'calver')
assert m.call_count == 7
def test_parts_no_part_map(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('version', '1.0', scheme='parts')
assert m.call_count == 0
expected_message = (
'Unable to transform `version` metadata value `1.0`: '
'Version scheme `parts` requires a `part_map` option'
)
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
class TestConfig:
def test_no_section(self, caplog):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', {})
assert m.call_count == 0
expected_message = 'Unable to transform `config` metadata: The `section` option is required'
for _, level, message in caplog.record_tuples:
if level == logging.ERROR and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_non_primitive(self, caplog):
check = AgentCheck('test', {}, [{'foo': ['bar']}])
check.check_id = 'test:123'
with caplog.at_level(logging.DEBUG), mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
expected_message = (
'Skipping metadata submission of non-primitive type `list` for field `foo` in section `instance`'
)
for _, level, message in caplog.record_tuples:
if level == logging.WARNING and message == expected_message:
break
else:
raise AssertionError('Expected ERROR log with message: {}'.format(expected_message))
def test_no_whitelist(self):
check = AgentCheck('test', {}, [{'foo': 'bar'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance')
assert m.call_count == 0
def test_whitelist(self):
check = AgentCheck('test', {}, [{'foo': 'bar'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
data = json.loads(args[2])[0]
assert data.pop('is_set', None) is True
assert data.pop('value', None) == 'bar'
assert not data
def test_whitelist_no_field(self):
check = AgentCheck('test', {}, [{}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
data = json.loads(args[2])[0]
assert data.pop('is_set', None) is False
assert not data
def test_blacklist(self):
check = AgentCheck('test', {}, [{'product_pw': 'foo'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['product_pw'], blacklist=['pw'])
assert m.call_count == 0
def test_blacklist_default(self):
check = AgentCheck('test', {}, [{'product_password': 'foo'}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['product_password'])
assert m.call_count == 0
def test_whitelist_user_override(self):
check = AgentCheck('test', {}, [{'foo': 'bar', 'bar': 'foo', 'metadata_whitelist': ['bar']}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata('config', check.instance, section='instance', whitelist=['foo', 'bar'])
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
data = json.loads(args[2])
assert len(data) == 1
data = data[0]
assert data.pop('is_set', None) is True
assert data.pop('value', None) == 'foo'
assert not data
def test_blacklist_user_override(self):
check = AgentCheck('test', {}, [{'foo': 'bar', 'bar': 'foo', 'metadata_blacklist': ['bar']}])
check.check_id = 'test:123'
with mock.patch(SET_CHECK_METADATA_METHOD) as m:
check.set_metadata(
'config', check.instance, section='instance', whitelist=['foo', 'bar'], blacklist=['foo']
)
assert m.call_count == 1
args, _ = m.call_args
assert args[0] == 'test:123'
assert args[1] == 'config.instance'
data = json.loads(args[2])
assert len(data) == 1
data = data[0]
assert data.pop('is_set', None) is True
assert data.pop('value', None) == 'bar'
assert not data
| 39.047619
| 120
| 0.56474
| 2,261
| 18,860
| 4.529854
| 0.092437
| 0.050576
| 0.057411
| 0.056044
| 0.800723
| 0.790373
| 0.771431
| 0.756981
| 0.729057
| 0.725932
| 0
| 0.035775
| 0.285631
| 18,860
| 482
| 121
| 39.128631
| 0.724412
| 0.008749
| 0
| 0.623626
| 0
| 0.016484
| 0.208079
| 0.018513
| 0
| 0
| 0
| 0
| 0.269231
| 1
| 0.074176
| false
| 0.005495
| 0.019231
| 0
| 0.10989
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2907d63c1ce52b5eabe6f375abda69f54aac7ae8
| 30
|
py
|
Python
|
conekt/flask_blast/__init__.py
|
legumeinfo/CoNekT
|
709a4980cfa255cafd456b268e274db2b4b1f5fb
|
[
"MIT"
] | 14
|
2018-08-20T03:07:21.000Z
|
2021-11-04T11:15:31.000Z
|
conekt/flask_blast/__init__.py
|
mutwil/CoNekT
|
f4a4496a87d14b15bcf587975b31a2edc24c6bf7
|
[
"MIT"
] | 9
|
2018-07-17T15:30:47.000Z
|
2021-07-05T13:11:54.000Z
|
conekt/flask_blast/__init__.py
|
mutwil/CoNekT
|
f4a4496a87d14b15bcf587975b31a2edc24c6bf7
|
[
"MIT"
] | 3
|
2019-08-05T09:16:34.000Z
|
2019-12-04T23:59:28.000Z
|
from .blast import BlastThread
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2920e659e9334d6742eaae20c4bb2cc66b910c21
| 4,073
|
py
|
Python
|
test/priors/test_normal_prior.py
|
techshot25/gpytorch
|
b4aee6f81a3428172d4914e7e0fef0e71cd1f519
|
[
"MIT"
] | 1
|
2019-11-08T11:25:56.000Z
|
2019-11-08T11:25:56.000Z
|
test/priors/test_normal_prior.py
|
VonRosenchild/gpytorch
|
092d523027a844939ba85d7ea8c8c7b7511843d5
|
[
"MIT"
] | null | null | null |
test/priors/test_normal_prior.py
|
VonRosenchild/gpytorch
|
092d523027a844939ba85d7ea8c8c7b7511843d5
|
[
"MIT"
] | 1
|
2021-07-02T19:40:07.000Z
|
2021-07-02T19:40:07.000Z
|
#!/usr/bin/env python3
import unittest
import torch
from gpytorch.priors import NormalPrior
from gpytorch.test.utils import least_used_cuda_device
from torch.distributions import Normal
class TestNormalPrior(unittest.TestCase):
def test_normal_prior_to_gpu(self):
if torch.cuda.is_available():
prior = NormalPrior(0, 1).cuda()
self.assertEqual(prior.loc.device.type, "cuda")
self.assertEqual(prior.scale.device.type, "cuda")
def test_normal_prior_validate_args(self):
with self.assertRaises(ValueError):
NormalPrior(0, -1, validate_args=True)
def test_normal_prior_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
mean = torch.tensor(0.0, device=device)
variance = torch.tensor(1.0, device=device)
prior = NormalPrior(mean, variance)
dist = Normal(mean, variance)
t = torch.tensor(0.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([-1, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.tensor([[-1, 0.5], [0.1, -2.0]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
def test_normal_prior_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_normal_prior_log_prob(cuda=True)
def test_normal_prior_log_prob_log_transform(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
mean = torch.tensor(0.0, device=device)
variance = torch.tensor(1.0, device=device)
prior = NormalPrior(mean, variance, transform=torch.exp)
dist = Normal(mean, variance)
t = torch.tensor(0.0, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([-1, 0.5], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
t = torch.tensor([[-1, 0.5], [0.1, -2.0]], device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t.exp())))
def test_normal_prior_log_prob_log_transform_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_normal_prior_log_prob_log_transform(cuda=True)
def test_normal_prior_batch_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
mean = torch.tensor([0.0, 1.0], device=device)
variance = torch.tensor([1.0, 2.0], device=device)
prior = NormalPrior(mean, variance)
dist = Normal(mean, variance)
t = torch.zeros(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.zeros(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
with self.assertRaises(RuntimeError):
prior.log_prob(torch.zeros(3, device=device))
mean = torch.tensor([[0.0, 1.0], [-1.0, 2.0]], device=device)
variance = torch.tensor([[1.0, 2.0], [0.5, 1.0]], device=device)
prior = NormalPrior(mean, variance)
dist = Normal(mean, variance)
t = torch.zeros(2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
t = torch.zeros(2, 2, device=device)
self.assertTrue(torch.equal(prior.log_prob(t), dist.log_prob(t)))
with self.assertRaises(RuntimeError):
prior.log_prob(torch.zeros(3, device=device))
with self.assertRaises(RuntimeError):
prior.log_prob(torch.zeros(2, 3, device=device))
def test_normal_prior_batch_log_prob_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
return self.test_normal_prior_batch_log_prob(cuda=True)
if __name__ == "__main__":
unittest.main()
| 42.873684
| 79
| 0.653081
| 578
| 4,073
| 4.422145
| 0.112457
| 0.087637
| 0.062598
| 0.101721
| 0.81964
| 0.81964
| 0.798122
| 0.755869
| 0.720657
| 0.699531
| 0
| 0.022036
| 0.208937
| 4,073
| 94
| 80
| 43.329787
| 0.77126
| 0.005156
| 0
| 0.605263
| 0
| 0
| 0.009134
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.105263
| false
| 0
| 0.065789
| 0
| 0.223684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
293b2a66bea32583eb57acca865d13c2909d52fd
| 28
|
py
|
Python
|
nicepy/tof/__init__.py
|
Campbell-IonMolecule/nicepy
|
c1c3f00a29795f520e1d898957784a975328fca2
|
[
"MIT"
] | null | null | null |
nicepy/tof/__init__.py
|
Campbell-IonMolecule/nicepy
|
c1c3f00a29795f520e1d898957784a975328fca2
|
[
"MIT"
] | null | null | null |
nicepy/tof/__init__.py
|
Campbell-IonMolecule/nicepy
|
c1c3f00a29795f520e1d898957784a975328fca2
|
[
"MIT"
] | null | null | null |
from nicepy.tof.tof import *
| 28
| 28
| 0.785714
| 5
| 28
| 4.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2969ccb8280d4ab99c60836d2ead1b9f6af595b8
| 25,615
|
py
|
Python
|
test/integration/component/test_organization_states.py
|
lujiefsi/cloudstack
|
74a7cbf753537928265c1f36afe086d69ad44e90
|
[
"Apache-2.0"
] | 1
|
2020-06-17T08:53:55.000Z
|
2020-06-17T08:53:55.000Z
|
test/integration/component/test_organization_states.py
|
lujiefsi/cloudstack
|
74a7cbf753537928265c1f36afe086d69ad44e90
|
[
"Apache-2.0"
] | 4
|
2016-06-01T14:35:16.000Z
|
2020-06-24T14:09:05.000Z
|
test/integration/component/test_organization_states.py
|
lujiefsi/cloudstack
|
74a7cbf753537928265c1f36afe086d69ad44e90
|
[
"Apache-2.0"
] | 1
|
2017-04-03T18:22:22.000Z
|
2017-04-03T18:22:22.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test cases relating to enabling/diabling of zone/pod/cluster/host
"""
# Import System modules
# Import System modules
import traceback
from marvin.cloudstackAPI import *
from marvin.cloudstackAPI import *
# Import Local Modules
from marvin.cloudstackTestCase import *
# Import Local Modules
from marvin.cloudstackTestCase import *
from marvin.lib.base import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.lib.common import *
from marvin.lib.utils import *
from marvin.lib.utils import *
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
class TestOrganizationStates(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls.testclient = super(TestOrganizationStates, cls).getClsTestClient()
cls.apiclient = cls.testclient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.cleanup = []
zone = get_zone(cls.apiclient, cls.testclient.getZoneForTests())
cls.zone = Zone(zone.__dict__)
cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata["ostype"])
hostList = Host.list(cls.apiclient, zoneid=cls.zone.id, type="routing")
cls.host = Host(hostList[0].__dict__)
clusterList = Cluster.list(cls.apiclient, id=hostList[0].clusterid)
cls.cluster = Cluster(clusterList[0].__dict__)
podList = Pod.list(cls.apiclient, id=hostList[0].podid)
cls.pod = Pod(podList[0].__dict__)
cls.serviceOffering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offering"],
hosttags="test"
)
hostupdResp = Host.update(cls.apiclient,
id=cls.host.id,
hosttags="test")
userAccountName = "-".join(("TestOrgUser", random_gen()))
adminAccountName = "-".join(("TestOrgAdmin", random_gen()))
cls.user_apiclient = cls.testclient.getUserApiClient(
UserName=userAccountName,
DomainName="ROOT"
)
cls.admin_apiclient = cls.testclient.getUserApiClient(
UserName=adminAccountName,
DomainName="ROOT",
type=1
)
accountList = Account.list(
cls.apiclient,
name=userAccountName,
listAll="true"
)
cls.account = Account(accountList[0].__dict__)
accountList = Account.list(
cls.apiclient,
name=adminAccountName,
listAll="true"
)
cls.adminAccount = Account(accountList[0].__dict__)
cls.cleanup = [
cls.account,
cls.adminAccount,
cls.serviceOffering
]
cls.vm_admin = VirtualMachine.create(
cls.admin_apiclient,
{},
zoneid=cls.zone.id,
serviceofferingid=cls.serviceOffering.id,
templateid=cls.template.id
)
cls.vm_user = VirtualMachine.create(
cls.user_apiclient,
{},
zoneid=cls.zone.id,
serviceofferingid=cls.serviceOffering.id,
templateid=cls.template.id
)
except Exception as e:
printex = traceback.format_exc()
cls.debug("Exception Occurred : {0}".format(printex))
cleanup_resources(cls.apiclient, cls.cleanup)
raise Exception("Failed to create the setup required to execute the test cases: %s" % e)
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestOrganizationStates, cls).getClsTestClient().getApiClient()
hostupdResp = Host.update(cls.apiclient,
id=cls.host.id,
hosttags="")
cleanup_resources(cls.apiclient, cls.cleanup)
return
def setUp(cls):
return
def tearDown(cls):
return
## Test cases relating to disabling and enabling zone
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_11_disableZone(self):
"""
Disable Zone
Validate that listZones() returns the allocationstate as "Disabled"
"""
self.debug("Zone to be disabled: " + self.zone.id)
zoneupdResp = self.zone.update(self.apiclient, allocationstate="Disabled")
self.assertEqual(zoneupdResp.allocationstate,
"Disabled",
"Disabling Zone did not set the alloctionstate to Disabled")
zonelistResp = Zone.list(self.apiclient, id=self.zone.id)
self.assertEqual(zonelistResp[0].allocationstate,
"Disabled",
"Disabling Zone did not set the alloctionstate to Disabled")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_12_disableZone_admin_deployVM(self):
"""
Validate that admin is allowed to deploy VM in a disabled zone
"""
vm = VirtualMachine.create(
self.admin_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
self.assertEqual(vm.state,
"Running",
"Admin is not able to deploy Vm in a disabled Zone! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_13_disableZone_admin_stop_startVM(self):
"""
Validate that admin is allowed to stop and start existing VMs that are running on a disabled zone
"""
self.vm_admin.stop(self.apiclient)
listResp = VirtualMachine.list(self.apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Admin is not able to Stop Vm in a disabled Zone! ")
self.vm_admin.start(self.apiclient)
listResp = VirtualMachine.list(self.admin_apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Admin is not able to Stop Vm in a disabled Zone! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_14_disableZone_user_deployVM(self):
"""
Validate that regular user is not allowed to deploy VM in a disabled zone
"""
try:
vm = VirtualMachine.create(
self.user_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
self.fail("Regular user is allowed to deploy VM in a zone that is disabled")
except Exception as e:
self.debug("Exception thrown when deploying Virtual Machine on a disabled zone - %s" % e)
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_15_disableZone_user_stop_startVM(self):
"""
Validate that regular user is allowed to stop and start existing VMs in a disabled zone
"""
self.vm_user.stop(self.user_apiclient)
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Regular user is not able to Stop Vm in a disabled Zone! ")
self.vm_user.start(self.user_apiclient)
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Regular is not able to Stop Vm in a disabled Zone! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_16_enableZone(self):
"""
Enable Zone that is diabled
Validate that listZones() returns the allocationstate as "Enabled"
"""
self.debug("Zone to be enabled: " + self.zone.id)
zoneupdResp = self.zone.update(self.apiclient, allocationstate="Enabled")
self.assertEqual(zoneupdResp.allocationstate,
"Enabled",
"Enabling Zone did not set the alloctionstate to Enabled")
zonelistResp = Zone.list(self.apiclient, id=self.zone.id)
self.assertEqual(zonelistResp[0].allocationstate,
"Enabled",
"Enabling Zone did not set the alloctionstate to Enabled")
## Test cases relating to disabling and enabling pod
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_21_disablePod(self):
"""
Disable Pod
Validate that listPods() returns the allocationstate as "Disabled"
"""
self.debug("Pod to be disabled: " + self.zone.id)
podupdResp = self.pod.update(self.apiclient, allocationstate="Disabled", id=self.pod.id)
self.assertEqual(podupdResp.allocationstate,
"Disabled",
"Disabling Pod did not set the alloctionstate to Disabled")
podlistResp = Pod.list(self.apiclient, id=self.pod.id)
self.assertEqual(podlistResp[0].allocationstate,
"Disabled",
"Disabling Pod did not set the alloctionstate to Disabled")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_22_disablePod_admin_deployVM(self):
"""
Validate that admin is allowed to deploy VM in a disabled pod
"""
vm = VirtualMachine.create(
self.admin_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
self.assertEqual(vm.state,
"Running",
"Admin is not able to deploy Vm in a disabled Pod! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_23_disablePod_admin_stop_startVM(self):
"""
Validate that admin is allowed to stop and start existing VMs running in a disabled pod
"""
self.vm_admin.stop(self.admin_apiclient)
listResp = VirtualMachine.list(self.apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Admin is not able to Stop Vm in a disabled Pod! ")
self.vm_admin.start(self.admin_apiclient)
listResp = VirtualMachine.list(self.apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Admin is not able to Stop Vm in a disabled Pod! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_24_disablePod_user_deployVM(self):
"""
Validate that regular user is not allowed to deploy VM in a disabled pod
"""
try:
vm = VirtualMachine.create(
self.user_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
self.fail("Regular user is allowed to deploy VM in a zone that is disabled")
except Exception as e:
self.debug("Exception thrown when deploying Virtual Machine on a disabled zone - %s" % e)
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_25_disablePod_user_stop_startVM(self):
"""
Validate that regular user is allowed to stop and start existing VMs runing in a disabled pod
"""
self.vm_user.stop(self.user_apiclient)
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Regular user is not able to Stop Vm in a disabled Pod! ")
self.vm_user.start(self.user_apiclient)
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Regular is not able to Stop Vm in a disabled Pod! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_26_enablePod(self):
"""
Enable Pod that is diabled
Validate that listPods() returns the allocationstate as "Enabled"
"""
self.debug("Pod to be enabled: " + self.zone.id)
podupdResp = self.pod.update(self.apiclient, allocationstate="Enabled", id=self.pod.id)
self.assertEqual(podupdResp.allocationstate,
"Enabled",
"Enabling Pod did not set the alloctionstate to Enabled")
podlistResp = Pod.list(self.apiclient, id=self.pod.id)
self.assertEqual(podlistResp[0].allocationstate,
"Enabled",
"Enabling Pod did not set the alloctionstate to Enabled")
## Test cases relating to disabling and enabling cluster
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_31_disableCluster(self):
"""
Disable Cluster
Validate that listClusters() returns the allocationstate as "Disabled"
"""
self.debug("Cluster to be disabled: " + self.cluster.id)
clusterupdResp = self.cluster.update(self.apiclient, allocationstate="Disabled", id=self.cluster.id)
self.assertEqual(clusterupdResp.allocationstate,
"Disabled",
"Disabling Cluster did not set the alloctionstate to Disabled")
clusterlistResp = Cluster.list(self.apiclient, id=self.cluster.id)
self.assertEqual(clusterlistResp[0].allocationstate,
"Disabled",
"Disabling Cluster did not set the alloctionstate to Disabled")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_32_disableCluster_admin_deployVM(self):
"""
Validate that admin is allowed to deploy VM in a disabled cluster
"""
vm = VirtualMachine.create(
self.admin_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
self.assertEqual(vm.state,
"Running",
"Admin is not able to deploy Vm in a disabled Cluster! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_33_disableCluster_admin_stop_startVM(self):
"""
Validate that admin is allowed to stop and start existing VMs that are running in a disabled cluster
"""
self.vm_admin.stop(self.admin_apiclient)
listResp = VirtualMachine.list(self.apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Admin is not able to Stop Vm in a disabled Cluster! ")
self.vm_admin.start(self.admin_apiclient)
listResp = VirtualMachine.list(self.apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Admin is not able to Stop Vm in a disabled Cluster! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_34_disableCluster_user_deployVM(self):
"""
Validate that regular user is not allowed to deploy VM in a disabled cluster
"""
try:
vm = VirtualMachine.create(
self.user_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
self.fail("Regular user is allowed to deploy VM in a cluster that is disabled")
except Exception as e:
self.debug("Exception thrown when deploying Virtual Machine on a disabled cluster - %s" % e)
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_35_disableCluster_user_stop_startVM(self):
"""
Validate that regular user is allowed to stop and start existing VMs that are running in a disabled cluster
"""
self.vm_user.stop(self.user_apiclient)
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Regular user is not able to Stop Vm in a disabled Cluster! ")
self.vm_user.start(self.user_apiclient)
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Regular is not able to Stop Vm in a disabled Cluster! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_36_enableCluster(self):
"""
Enable Cluster that is diabled
Validate that listClusters() returns the allocationstate as "Enabled"
"""
self.debug("Cluster to be enabled: " + self.cluster.id)
clusterupdResp = self.cluster.update(self.apiclient, allocationstate="Enabled", id=self.cluster.id)
self.assertEqual(clusterupdResp.allocationstate,
"Enabled",
"Enabling Cluster did not set the alloctionstate to Enabled")
clusterlistResp = Cluster.list(self.apiclient, id=self.cluster.id)
self.assertEqual(clusterlistResp[0].allocationstate,
"Enabled",
"Enabling Cluster did not set the alloctionstate to Enabled")
## Test cases relating to disabling and enabling host
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_41_disableHost(self):
"""
Disable Host
Validate that listHosts() returns the allocationstate as "Disabled"
"""
self.debug("Host to be disabled: " + self.host.id)
hostupdResp = Host.update(self.apiclient, id=self.host.id, allocationstate="Disable")
self.assertEqual(hostupdResp.resourcestate,
"Disabled",
"Disabling Host did not set the alloctionstate to Disabled")
hostlistResp = Host.list(self.apiclient, id=self.host.id)
self.assertEqual(hostlistResp[0].resourcestate,
"Disabled",
"Disabling Host did not set the alloctionstate to Disabled")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_42_disableHost_admin_deployVM(self):
"""
Validate that admin is allowed to deploy VM in a disabled host by passing hostId parameter
"""
vm = VirtualMachine.create(
self.admin_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id,
hostid=self.host.id
)
self.assertEqual(vm.state,
"Running",
"Admin is not able to deploy Vm in a disabled Host! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_43_disableHost_admin_deployVM(self):
"""
Validate that admin is allowed to deploy VM in a disabled host without passing hostId parameter
"""
try:
vm = VirtualMachine.create(
self.admin_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
except Exception:
raise self.fail("Failed to deploy VM, this issue was hit: https://issues.apache.org/jira/browse/CLOUDSTACK-7735")
self.assertEqual(vm.state,
"Running",
"Admin is not able to deploy Vm in a disabled Host! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_44_disableHost_admin_stop_startVM(self):
"""
Validate that admin is allowed to stop and start existing VMs running in a disabled host
"""
self.vm_admin.stop(self.admin_apiclient)
listResp = VirtualMachine.list(self.apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Admin is not able to Stop Vm in a disabled Host! ")
try:
self.vm_admin.start(self.admin_apiclient)
except Exception:
raise self.fail("Failed to deploy VM, this issue was hit: https://issues.apache.org/jira/browse/CLOUDSTACK-7735")
listResp = VirtualMachine.list(self.apiclient, id=self.vm_admin.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Admin is not able to Stop Vm in a disabled Host! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_45_disableHost_user_deployVM(self):
"""
Validate that regular user is not allowed to deploy VM in a disabled host
"""
try:
vm = VirtualMachine.create(
self.user_apiclient,
{},
zoneid=self.zone.id,
serviceofferingid=self.serviceOffering.id,
templateid=self.template.id
)
self.fail("Regular user is allowed to deploy VM in a host that is disabled")
except Exception as e:
self.debug("Exception thrown when deploying Virtual Machine on a disabled host - %s" % e)
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_46_disableHost_user_stop_startVM(self):
"""
Validate that regular user is allowed to stop and start existing VMs running in a disabled host
"""
self.vm_user.stop(self.user_apiclient)
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.STOPPED,
"Regular user is not able to Stop Vm in a disabled Host! ")
try:
self.vm_user.start(self.user_apiclient)
except Exception:
raise self.fail("Failed to deploy VM, this issue was hit: https://issues.apache.org/jira/browse/CLOUDSTACK-7735")
listResp = VirtualMachine.list(self.user_apiclient, id=self.vm_user.id)
self.assertEqual(listResp[0].state,
VirtualMachine.RUNNING,
"Regular is not able to Stop Vm in a disabled Host! ")
@attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
def test_47_enableHost(self):
"""
Enable Host that is diabled
Validate that listHosts() returns the allocationstate as "Enabled"
"""
self.debug("Host to be enabled: " + self.host.id)
hostupdResp = Host.update(self.apiclient, id=self.host.id, allocationstate="Enable")
self.assertEqual(hostupdResp.resourcestate,
"Enabled",
"Enabling Host did not set the alloctionstate to Enabled")
hostlistResp = Host.list(self.apiclient, id=self.host.id)
self.assertEqual(hostlistResp[0].resourcestate,
"Enabled",
"Enabling Host did not set the alloctionstate to Enabled")
| 40.85327
| 125
| 0.603279
| 2,761
| 25,615
| 5.507425
| 0.10105
| 0.026042
| 0.026766
| 0.025648
| 0.816454
| 0.799684
| 0.778114
| 0.732343
| 0.712877
| 0.705314
| 0
| 0.005563
| 0.30529
| 25,615
| 626
| 126
| 40.91853
| 0.848946
| 0.128792
| 0
| 0.679901
| 0
| 0.007444
| 0.198952
| 0
| 0
| 0
| 0
| 0
| 0.091811
| 1
| 0.07196
| false
| 0
| 0.029777
| 0.004963
| 0.111663
| 0.004963
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
29843cde40a686912d9c5b78a12cf2d9d6355a9a
| 6,133
|
py
|
Python
|
tests/app/api/services/test_abr.py
|
AusDTO/dto-digitalmarketplace-api
|
937843c9c01a71518cf4688b4daa55bbe7df1965
|
[
"MIT"
] | 6
|
2017-06-09T03:38:53.000Z
|
2021-12-22T02:42:15.000Z
|
tests/app/api/services/test_abr.py
|
AusDTO/dto-digitalmarketplace-api
|
937843c9c01a71518cf4688b4daa55bbe7df1965
|
[
"MIT"
] | 47
|
2016-08-02T05:21:31.000Z
|
2022-03-28T01:14:17.000Z
|
tests/app/api/services/test_abr.py
|
AusDTO/dto-digitalmarketplace-api
|
937843c9c01a71518cf4688b4daa55bbe7df1965
|
[
"MIT"
] | 7
|
2016-09-13T13:07:18.000Z
|
2021-02-17T10:16:21.000Z
|
import pytest
from app.api.services import abr_service
from app.api.business.errors import AbrError
import requests
import mock
from mock import patch
class TestAbrService():
def mocked_find_business_by_abn(self):
data = '<ABR><response><stateCode>NSW</stateCode><postcode>2750</postcode>'\
'<organisationName>yay</organisationName></response></ABR>'
return data
def mocked_payload_exception(self):
data = '<ABR><response><exception><exceptionDescription>Search text is not a '\
'valid ABN or ACN</exceptionDescription><exceptionCode>WEBSERVICES</exceptionCode>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_description(self):
data = '<ABR><response><exception><exceptionCode>WEBSERVICES</exceptionCode>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_code(self):
data = '<ABR><response><exception><exceptionDescription>Search text is not a '\
'valid ABN or ACN</exceptionDescription>'\
'</exception></response></ABR>'
return data
def mocked_payload_exception_with_no_code_and_no_description(self):
data = '<ABR><response></response></ABR>'
return data
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_response_can_be_parsed(self, mocked_find_business_by_abn):
expected_parsed_data = {'state': 'NSW', 'organisation_name': 'yay', 'postcode': '2750'}
data = abr_service.get_data(self.mocked_find_business_by_abn())
assert data == expected_parsed_data
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed(self, mocked_payload_exception):
expected_msg = 'WEBSERVICES: Search text is not a valid ABN or ACN'
result = abr_service.get_abr_exception(self.mocked_payload_exception())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed_with_no_exception_desc(self, mocked_payload_exception_with_no_description):
expected_msg = 'WEBSERVICES: No exception description found'
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_description())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_can_be_parsed_with_no_exception_code(self, mocked_payload_exception_with_no_code):
expected_msg = 'No exception code found: Search text is not a valid ABN or ACN'
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_code())
assert result == expected_msg
@mock.patch("app.api.services.abr_service.call_abr_api")
def test_abr_exception_parsed_with_no_ex_code_desc(self, mocked_payload_exception_with_no_code_and_no_description):
expected_msg = None
result = abr_service.get_abr_exception(self.mocked_payload_exception_with_no_code_and_no_description())
assert result == expected_msg
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_connecton_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ConnectionError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.ConnectionError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_ssl_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.SSLError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.SSLError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_http_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.HTTPError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.HTTPError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_proxy_error_exception_raised(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ProxyError()
url = 'http://google.com'
with pytest.raises(requests.exceptions.ProxyError):
abr_service.call_abr_api(url)
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_http_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.HTTPError('HTTP Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.HTTPError) as ex_info:
abr_service.call_abr_api(url)
assert str(ex_info.value) == 'HTTP Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_proxy_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ProxyError('Proxy Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.ProxyError) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'Proxy Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_ssl_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.SSLError('SSL Error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.SSLError) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'SSL Error'
@mock.patch('app.api.services.abr_service.call_abr_api')
def test_exception_message(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.RequestException('Unexpected request error')
url = 'http://google.com'
with pytest.raises(requests.exceptions.RequestException) as ex_msg:
abr_service.call_abr_api(url)
assert str(ex_msg.value) == 'Unexpected request error'
| 47.176923
| 119
| 0.725583
| 801
| 6,133
| 5.213483
| 0.113608
| 0.064655
| 0.070402
| 0.085489
| 0.837883
| 0.825192
| 0.800527
| 0.779693
| 0.768678
| 0.744732
| 0
| 0.001575
| 0.17202
| 6,133
| 129
| 120
| 47.542636
| 0.820796
| 0
| 0
| 0.409524
| 0
| 0
| 0.251101
| 0.17039
| 0
| 0
| 0
| 0
| 0.085714
| 1
| 0.171429
| false
| 0
| 0.057143
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
469675e829c35b4f742d00ddf8703e46eb2ca6ae
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/virtualenv/seed/wheels/acquire.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/virtualenv/seed/wheels/acquire.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/virtualenv/seed/wheels/acquire.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/48/9b/8a/86a24f142fd29f8950b6d23fa4019951798edcb1a055a433bc6cba586a
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
46a5e14c78ed76c9faadade20e3030f9fd288872
| 34
|
py
|
Python
|
basics/hello.py
|
ashwinkonireddy/pythonAutomation
|
d551a51744a4f8ba3d6f8def3c070f6565ac233f
|
[
"MIT"
] | null | null | null |
basics/hello.py
|
ashwinkonireddy/pythonAutomation
|
d551a51744a4f8ba3d6f8def3c070f6565ac233f
|
[
"MIT"
] | null | null | null |
basics/hello.py
|
ashwinkonireddy/pythonAutomation
|
d551a51744a4f8ba3d6f8def3c070f6565ac233f
|
[
"MIT"
] | null | null | null |
print("Welcome To python class 1")
| 34
| 34
| 0.764706
| 6
| 34
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.117647
| 34
| 1
| 34
| 34
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
313254db8a5cd4c9ebec4f3e3c31ea4d8f0f811e
| 30
|
py
|
Python
|
app/rooms/examples/eg001_create_room_with_data/__init__.py
|
olegliubimov/code-examples-python
|
7af8c58138a9dd0f3b0be12eff1768ae23e449d3
|
[
"MIT"
] | 21
|
2020-05-13T21:08:44.000Z
|
2022-02-18T01:32:16.000Z
|
app/rooms/examples/eg001_create_room_with_data/__init__.py
|
olegliubimov/code-examples-python
|
7af8c58138a9dd0f3b0be12eff1768ae23e449d3
|
[
"MIT"
] | 8
|
2020-11-23T09:28:04.000Z
|
2022-02-02T12:04:08.000Z
|
app/rooms/examples/eg001_create_room_with_data/__init__.py
|
olegliubimov/code-examples-python
|
7af8c58138a9dd0f3b0be12eff1768ae23e449d3
|
[
"MIT"
] | 26
|
2020-05-12T22:20:01.000Z
|
2022-03-09T10:57:27.000Z
|
from .views import eg001Rooms
| 15
| 29
| 0.833333
| 4
| 30
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 0.133333
| 30
| 1
| 30
| 30
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
314350f200594460a6c5fdfcfae4a63419885883
| 34,125
|
py
|
Python
|
pirates/leveleditor/worldData/interior_spanish_store_voodoo.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 81
|
2018-04-08T18:14:24.000Z
|
2022-01-11T07:22:15.000Z
|
pirates/leveleditor/worldData/interior_spanish_store_voodoo.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 4
|
2018-09-13T20:41:22.000Z
|
2022-01-08T06:57:00.000Z
|
pirates/leveleditor/worldData/interior_spanish_store_voodoo.py
|
Willy5s/Pirates-Online-Rewritten
|
7434cf98d9b7c837d57c181e5dabd02ddf98acb7
|
[
"BSD-3-Clause"
] | 26
|
2018-05-26T12:49:27.000Z
|
2021-09-11T09:11:59.000Z
|
from pandac.PandaModules import Point3, VBase3, Vec4, Vec3
objectStruct = {'Objects': {'1155774520.99fxlara0': {'Type': 'Building Interior','Name': '','Instanced': True,'Objects': {'1167169513.29kmuller': {'Type': 'Furniture','DisableCollision': False,'Holiday': '','Hpr': VBase3(89.625, 0.0, 0.0),'Pos': Point3(-14.077, -6.928, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/counter_spanish'}},'1167169555.04kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(-174.358, 0.0, 0.0),'Pos': Point3(2.514, 21.672, 0.0),'Scale': VBase3(1.0, 0.976, 1.0),'Visual': {'Color': (0.699999988079071, 0.699999988079071, 0.699999988079071, 1.0),'Model': 'models/props/prop_group_G'}},'1167169630.62kmuller': {'Type': 'Barrel','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7.037, 21.686, 0.0),'Scale': VBase3(0.754, 0.754, 0.754),'Visual': {'Color': (0.7900000214576721, 0.7799999713897705, 0.699999988079071, 1.0),'Model': 'models/props/barrel'}},'1167169672.53kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(52.479, 0.0, 0.0),'Pos': Point3(-7.743, 17.253, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.54, 0.46, 0.45, 1.0),'Model': 'models/props/crate_04'}},'1167169797.65kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(179.951, 0.0, 0.0),'Pos': Point3(-1.753, -22.59, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/cabinet_spanish'}},'1167169830.51kmuller': {'Type': 'Furniture','DisableCollision': True,'Holiday': '','Hpr': VBase3(179.951, 0.0, 0.0),'Pos': Point3(-6.733, -22.561, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Color': (0.7900000214576721, 0.7799999713897705, 0.699999988079071, 1.0),'Model': 'models/props/cabinet_spanish'}},'1167169871.42kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(179.951, 0.0, 0.0),'Pos': Point3(-11.677, -22.569, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cabinet_spanish'}},'1167169962.32kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(179.951, 0.0, 0.0),'Pos': Point3(3.235, -22.57, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cabinet_spanish'}},'1167170197.59kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(179.035, 0.0, 0.0),'Pos': Point3(-1.853, 26.838, 5.265),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cabinet_spanish_low'}},'1167170325.24kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(178.919, 0.0, 0.0),'Pos': Point3(4.209, 26.822, 5.265),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/cabinet_spanish_low'}},'1167170420.31kmuller': {'Type': 'Barrel','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(8.693, 27.554, 5.265),'Scale': VBase3(0.625, 0.625, 0.625),'Visual': {'Color': (0.44999998807907104, 0.3799999952316284, 0.25, 1.0),'Model': 'models/props/barrel'}},'1167170455.98kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(-82.457, 0.0, 0.0),'Pos': Point3(-6.952, 28.315, 5.265),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7900000214576721, 0.7799999713897705, 0.699999988079071, 1.0),'Model': 'models/props/crate_04'}},'1167170519.04kmuller': {'Type': 'Crate','DisableCollision': True,'Hpr': VBase3(-23.486, 0.0, 0.0),'Pos': Point3(-6.284, 28.094, 8.019),'Scale': VBase3(0.71, 0.71, 0.71),'Visual': {'Color': (0.7900000214576721, 0.7799999713897705, 0.699999988079071, 1.0),'Model': 'models/props/crate'}},'1167170604.54kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(0.986, 0.0, 0.0),'Pos': Point3(-1.887, 46.262, 5.181),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/bookshelf_spanish'}},'1167170618.14kmuller': {'Type': 'Furniture','DisableCollision': True,'Hpr': VBase3(0.986, 0.0, 0.0),'Pos': Point3(7.126, 46.343, 5.181),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/bookshelf_spanish'}},'1167170657.99kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(155.826, 0.0, 0.0),'Pos': Point3(16.68, 49.543, 5.265),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/prop_group_A'}},'1167174651.37kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Holiday': '','Hpr': VBase3(-89.936, 0.0, 0.0),'Pos': Point3(17.241, -7.728, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'VisSize': '','Visual': {'Model': 'models/props/fireplace_stucco'}},'1167174743.4kmuller': {'Type': 'Prop_Groups','DisableCollision': True,'Hpr': VBase3(71.999, 0.0, 0.0),'Pos': Point3(22.327, -19.899, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7900000214576721, 0.7799999713897705, 0.699999988079071, 1.0),'Model': 'models/props/prop_group_A'}},'1167174807.87kmuller': {'Type': 'Tools','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(18.558, 0.776, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/butter_churn'}},'1167867948.46kmuller': {'Type': 'Mortar_Pestle','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-14.133, -11.946, 3.371),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/mortar_pestle_stone'}},'1167867959.76kmuller': {'Type': 'Mortar_Pestle','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-13.256, -22.848, 5.057),'Scale': VBase3(1.615, 1.615, 1.615),'Visual': {'Model': 'models/props/mortar_pestle_wood'}},'1167868005.46kmuller': {'Type': 'ChickenCage','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.438, 3.594, -0.02),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/ChickenCage'}},'1167868021.48kmuller': {'Type': 'ChickenCage','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.469, 6.503, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/ChickenCage'}},'1181176768.6kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.0, 0.546, 0.0),'Pos': Point3(-10.179, -22.415, 8.921),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_doctor_bottles'}},'1181176924.96kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-176.822, 0.0, 0.0),'Pos': Point3(-2.101, -22.857, 3.231),'Scale': VBase3(0.681, 0.681, 0.681),'Visual': {'Model': 'models/props/shop_doctor_bottles'}},'1181177042.99kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Hpr': VBase3(-116.65, 0.0, 0.0),'Pos': Point3(-5.088, -22.397, 6.978),'Scale': VBase3(0.714, 0.714, 0.714),'Visual': {'Model': 'models/props/bottle_brown'}},'1181177061.14kmuller': {'Type': 'Jugs_and_Jars','DisableCollision': False,'Hpr': VBase3(57.846, 0.0, 0.0),'Pos': Point3(-6.01, -22.266, 6.95),'Scale': VBase3(0.792, 0.792, 0.792),'Visual': {'Model': 'models/props/bottle_green'}},'1181177147.42kmuller': {'Type': 'Interior_furnishings','DisableCollision': True,'Hpr': VBase3(90.286, 0.0, 0.0),'Pos': Point3(-17.133, -22.111, 0.0),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_weapons_rack_floor'}},'1181177165.24kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(156.308, -17.861, 172.335),'Pos': Point3(-17.41, -21.727, 0.221),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_voodoo_staff'}},'1181177192.32kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-178.173, -16.759, 0.0),'Pos': Point3(-18.367, -21.804, 0.202),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_voodoo_staff_skull'}},'1181177252.45kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(46.523, 18.747, 161.276),'Pos': Point3(-16.252, -21.441, 0.426),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.64, 0.59, 0.97, 1.0),'Model': 'models/props/shop_voodoo_staff'}},'1181177360.49kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-13.705, -6.61, 3.6),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_weapons_rack_table'}},'1181177379.57kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(0.001, -89.487, -179.399),'Pos': Point3(-13.684, -4.731, 4.203),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (1.0, 0.9599999785423279, 0.75, 1.0),'Model': 'models/props/shop_voodoo_staff_skull'}},'1181177488.71kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-180.0, 84.575, 180.0),'Pos': Point3(-9.947, -23.049, 3.577),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_voodoo_doll'}},'1181177536.15kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-177.405, 73.63, 177.511),'Pos': Point3(-10.475, -23.03, 3.6),'Scale': VBase3(1.163, 1.163, 1.163),'Visual': {'Color': (0.6200000047683716, 0.6600000262260437, 0.6200000047683716, 1.0),'Model': 'models/props/shop_voodoo_doll'}},'1181177605.85kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-168.383, 75.201, 168.758),'Pos': Point3(-10.958, -23.023, 3.553),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Color': (0.7900000214576721, 0.6499999761581421, 0.5299999713897705, 1.0),'Model': 'models/props/shop_voodoo_doll'}},'1181177638.01kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(171.18, 74.835, -171.483),'Pos': Point3(-11.515, -23.115, 3.555),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_voodoo_doll'}},'1181177689.26kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(180.0, 76.487, 180.0),'Pos': Point3(-12.621, -23.027, 3.444),'Scale': VBase3(0.654, 0.654, 0.654),'Visual': {'Model': 'models/props/shop_voodoo_doll'}},'1181177714.14kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(164.026, 80.383, -164.239),'Pos': Point3(-12.935, -22.992, 3.45),'Scale': VBase3(0.646, 0.646, 0.646),'Visual': {'Color': (0.5799999833106995, 0.5699999928474426, 0.47999998927116394, 1.0),'Model': 'models/props/shop_voodoo_doll'}},'1181177748.01kmuller': {'Type': 'Interior_furnishings','DisableCollision': False,'Hpr': VBase3(-180.0, 78.192, 180.0),'Pos': Point3(-13.334, -23.09, 3.551),'Scale': VBase3(1.0, 1.0, 1.0),'Visual': {'Model': 'models/props/shop_voodoo_doll'}},'1181177792.98kmuller': {'Type': 'Rock','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(3.366, -22.084, 5.042),'Scale': VBase3(0.142, 0.142, 0.142),'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0),'Model': 'models/props/rock_1_sphere'}},'1181240276.04kmuller': {'Type': 'Rock','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(4.936, -22.371, 5.044),'Scale': VBase3(0.276, 0.276, 0.276),'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0),'Model': 'models/props/rock_2_sphere'}},'1181240305.12kmuller': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(101.969, 0.0, 0.664),'Pos': Point3(1.631, -22.08, 5.071),'Scale': VBase3(0.627, 0.627, 0.627),'Visual': {'Color': (0.30000001192092896, 0.30000001192092896, 0.30000001192092896, 1.0),'Model': 'models/props/rock_4_sphere'}},'1181240338.17kmuller': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(33.309, -18.017, -99.629),'Pos': Point3(2.656, -21.917, 5.214),'Scale': VBase3(0.14, 0.14, 0.14),'Visual': {'Model': 'models/props/rock_3_sphere'}},'1181240378.42kmuller': {'Type': 'Rock','DisableCollision': True,'Hpr': VBase3(-104.71, 0.0, 0.0),'Pos': Point3(2.9, -21.847, 5.044),'Scale': VBase3(0.046, 0.046, 0.046),'Visual': {'Color': (0.5, 0.5, 0.5, 1.0),'Model': 'models/props/rock_1_sphere'}},'1181240452.62kmuller': {'Type': 'Pots','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-10.502, -22.334, 5.044),'Scale': VBase3(1.378, 1.378, 1.378),'Visual': {'Model': 'models/props/pot_A'}},'1181240524.84kmuller': {'Type': 'Pots','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-13.054, -22.281, 6.955),'Scale': VBase3(1.439, 1.439, 1.439),'Visual': {'Model': 'models/props/pot_B'}},'1181240558.07kmuller': {'Type': 'Trunks','DisableCollision': True,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7.849, -22.3, 5.044),'Scale': VBase3(0.601, 0.601, 0.601),'Visual': {'Model': 'models/props/Trunk_rounded_2'}},'1185497945.93kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-97.821, 0.0, 0.0),'Pos': Point3(11.241, -19.792, -0.433),'Scale': VBase3(0.775, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185497963.34kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-150.936, 0.0, 0.0),'Pos': Point3(13.43, -15.195, -0.461),'Scale': VBase3(0.432, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185498025.14kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(0.329, 14.911, -0.493),'Scale': VBase3(2.099, 1.841, 1.841),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185498100.98kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': VBase3(-140.319, 0.0, 0.0),'Pos': Point3(18.052, 1.279, -0.31),'Scale': VBase3(0.572, 1.0, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_plane'}},'1185498134.45kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(0.531, 27.369, 5.288),'Scale': VBase3(3.912, 0.989, 1.0),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185498167.43kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(3.326, 46.02, 4.158),'Scale': VBase3(3.806, 0.485, 1.914),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185498195.21kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(15.787, 42.121, 4.797),'Scale': VBase3(1.769, 1.461, 1.212),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185498225.17kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-18.887, 4.927, -0.392),'Scale': VBase3(0.699, 1.161, 0.715),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1185498302.78kmuller': {'Type': 'Collision Barrier','DisableCollision': False,'Hpr': Point3(0.0, 0.0, 0.0),'Pos': Point3(-7.4, -22.485, -0.37),'Scale': VBase3(5.387, 0.527, 2.013),'Visual': {'Model': 'models/misc/pir_m_prp_lev_cambarrier_cube'}},'1257965445.33caoconno': {'Type': 'Door Locator Node','Name': 'door_locator','Hpr': VBase3(-180.0, 0.0, 0.0),'Pos': Point3(-13.419, 47.56, 5.309),'Scale': VBase3(1.0, 1.0, 1.0)},'1257965499.45caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(124.406, 5.371, -0.0),'Pos': Point3(14.632, -9.396, 5.316),'Scale': VBase3(1.379, 1.379, 1.379),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking03_winter09'}},'1257965525.12caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(124.406, 0.0, 0.0),'Pos': Point3(14.617, -5.764, 5.316),'Scale': VBase3(1.379, 1.379, 1.379),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking03_winter09'}},'1257965528.76caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(124.406, 0.0, 0.0),'Pos': Point3(14.632, -7.573, 5.519),'Scale': VBase3(1.379, 1.379, 1.379),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoStocking03_winter09'}},'1257965602.03caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(159.06, -74.912, -22.153),'Pos': Point3(-13.692, -7.024, 4.39),'Scale': VBase3(0.439, 0.439, 0.439),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}},'1257965681.46caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(124.855, 0.0, 0.0),'Pos': Point3(-8.141, -22.033, 3.3),'Scale': VBase3(0.502, 0.502, 0.502),'VisSize': '','Visual': {'Color': (1.0, 0.9599999785423279, 0.75, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}},'1257965691.43caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(42.298, 0.0, 0.0),'Pos': Point3(-0.419, -22.255, 5.044),'Scale': VBase3(0.55, 0.55, 0.55),'VisSize': '','Visual': {'Color': (1.0, 0.6000000238418579, 0.800000011920929, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}},'1257965702.46caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(70.536, 0.0, 0.0),'Pos': Point3(-1.601, -22.038, 5.044),'Scale': VBase3(0.587, 0.587, 0.587),'VisSize': '','Visual': {'Color': (0.800000011920929, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}},'1257965738.45caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(70.536, 0.0, 0.0),'Pos': Point3(-3.028, -22.313, 5.044),'Scale': VBase3(0.699, 0.699, 0.699),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}},'1257965753.14caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(111.184, 0.0, 0.0),'Pos': Point3(4.832, -21.946, 3.3),'Scale': VBase3(0.537, 0.537, 0.537),'VisSize': '','Visual': {'Color': (1.0, 1.0, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift04_winter08'}},'1257965770.29caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(89.576, 0.0, 0.0),'Pos': Point3(-6.917, -22.075, 3.3),'Scale': VBase3(0.699, 0.699, 0.699),'VisSize': '','Visual': {'Color': (1.0, 0.800000011920929, 0.800000011920929, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift03_winter08'}},'1257965795.67caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(35.8, 0.0, 0.0),'Pos': Point3(-5.449, -22.032, 3.3),'Scale': VBase3(0.699, 0.699, 0.699),'VisSize': '','Visual': {'Color': (0.6000000238418579, 1.0, 0.800000011920929, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}},'1257965806.04caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(35.8, 0.0, 0.0),'Pos': Point3(2.026, -22.019, 3.3),'Scale': VBase3(0.599, 0.599, 0.599),'VisSize': '','Visual': {'Color': (1.0, 0.9599999785423279, 0.75, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift01_winter08'}},'1257965818.86caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 2.362, -0.0),'Pos': Point3(-11.642, -21.156, 2.029),'Scale': VBase3(0.402, 0.402, 0.402),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257965862.56caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(75.806, 0.0, 0.0),'Pos': Point3(3.622, -22.15, 3.3),'Scale': VBase3(0.699, 0.699, 0.699),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoGift03_winter08'}},'1257965884.76caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(90.795, 0.0, 0.0),'Pos': Point3(-19.514, 9.722, 14.721),'Scale': VBase3(0.926, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257965915.03caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 0.085, 0.0),'Pos': Point3(-1.712, -21.179, 2.032),'Scale': VBase3(0.402, 0.402, 0.402),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257965940.54caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 0.103, 0.0),'Pos': Point3(-6.72, -21.17, 2.033),'Scale': VBase3(0.402, 0.402, 0.402),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257966011.85caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(179.705, -9.602, -0.0),'Pos': Point3(-17.202, -23.143, 4.42),'Scale': VBase3(0.769, 0.769, 0.769),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}},'1257966041.18caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(94.099, 0.0, 0.718),'Pos': Point3(-12.829, -1.345, 3.293),'Scale': VBase3(0.761, 0.761, 0.761),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}},'1257966066.34caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(89.586, 0.088, -1.116),'Pos': Point3(-12.816, -12.347, 3.243),'Scale': VBase3(1.359, 0.904, 0.572),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoRibbon_winter08'}},'1257966095.53caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(94.099, -0.0, -1.119),'Pos': Point3(-12.816, -12.347, 3.243),'Scale': VBase3(0.761, 0.761, 0.761),'VisSize': '','Visual': {'Model': 'models/props/pir_m_prp_hol_decoBow_winter08'}},'1257966165.28caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 0.0, 0.0),'Pos': Point3(3.231, -21.11, 2.033),'Scale': VBase3(0.402, 0.402, 0.402),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257966183.09caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(89.102, 0.0, 0.0),'Pos': Point3(-19.837, -4.713, 14.721),'Scale': VBase3(1.78, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoRibbon_winter08'}},'1257966197.23caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(90.795, 0.0, 0.0),'Pos': Point3(-19.536, -4.645, 14.721),'Scale': VBase3(0.926, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257966236.85caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(90.795, 0.0, 0.0),'Pos': Point3(-19.637, -19.16, 14.721),'Scale': VBase3(0.926, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257966258.46caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(90.795, 0.0, 0.0),'Pos': Point3(-19.638, -19.078, 14.721),'Scale': VBase3(1.78, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoRibbon_winter08'}},'1257966326.74caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 0.0, 0.0),'Pos': Point3(14.278, -23.172, 14.195),'Scale': VBase3(0.926, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257966326.77caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 0.0, 0.0),'Pos': Point3(-0.238, -23.204, 14.195),'Scale': VBase3(0.926, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257966326.79caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 0.0, 0.0),'Pos': Point3(-14.606, -23.232, 14.195),'Scale': VBase3(0.926, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoSwag_winter08'}},'1257966326.81caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-179.634, 0.0, 0.0),'Pos': Point3(-0.167, -23.505, 14.195),'Scale': VBase3(1.78, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoRibbon_winter08'}},'1257966326.82caoconno': {'Type': 'Holiday','DisableCollision': False,'Holiday': 'WinterFestival','Hpr': VBase3(-178.679, 0.0, 0.0),'Pos': Point3(14.196, -23.174, 14.195),'Scale': VBase3(1.78, 0.926, 0.926),'VisSize': '','Visual': {'Color': (0.6000000238418579, 0.800000011920929, 1.0, 1.0),'Model': 'models/props/pir_m_prp_hol_decoRibbon_winter08'}}},'Visual': {'Model': 'models/buildings/interior_spanish_store'}}},'Node Links': [],'Layers': {},'ObjectIds': {'1155774520.99fxlara0': '["Objects"]["1155774520.99fxlara0"]','1167169513.29kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169513.29kmuller"]','1167169555.04kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169555.04kmuller"]','1167169630.62kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169630.62kmuller"]','1167169672.53kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169672.53kmuller"]','1167169797.65kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169797.65kmuller"]','1167169830.51kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169830.51kmuller"]','1167169871.42kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169871.42kmuller"]','1167169962.32kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167169962.32kmuller"]','1167170197.59kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170197.59kmuller"]','1167170325.24kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170325.24kmuller"]','1167170420.31kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170420.31kmuller"]','1167170455.98kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170455.98kmuller"]','1167170519.04kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170519.04kmuller"]','1167170604.54kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170604.54kmuller"]','1167170618.14kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170618.14kmuller"]','1167170657.99kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167170657.99kmuller"]','1167174651.37kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167174651.37kmuller"]','1167174743.4kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167174743.4kmuller"]','1167174807.87kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167174807.87kmuller"]','1167867948.46kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167867948.46kmuller"]','1167867959.76kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167867959.76kmuller"]','1167868005.46kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167868005.46kmuller"]','1167868021.48kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1167868021.48kmuller"]','1181176768.6kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181176768.6kmuller"]','1181176924.96kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181176924.96kmuller"]','1181177042.99kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177042.99kmuller"]','1181177061.14kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177061.14kmuller"]','1181177147.42kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177147.42kmuller"]','1181177165.24kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177165.24kmuller"]','1181177192.32kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177192.32kmuller"]','1181177252.45kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177252.45kmuller"]','1181177360.49kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177360.49kmuller"]','1181177379.57kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177379.57kmuller"]','1181177488.71kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177488.71kmuller"]','1181177536.15kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177536.15kmuller"]','1181177605.85kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177605.85kmuller"]','1181177638.01kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177638.01kmuller"]','1181177689.26kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177689.26kmuller"]','1181177714.14kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177714.14kmuller"]','1181177748.01kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177748.01kmuller"]','1181177792.98kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181177792.98kmuller"]','1181240276.04kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181240276.04kmuller"]','1181240305.12kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181240305.12kmuller"]','1181240338.17kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181240338.17kmuller"]','1181240378.42kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181240378.42kmuller"]','1181240452.62kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181240452.62kmuller"]','1181240524.84kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181240524.84kmuller"]','1181240558.07kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1181240558.07kmuller"]','1185497945.93kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185497945.93kmuller"]','1185497963.34kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185497963.34kmuller"]','1185498025.14kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185498025.14kmuller"]','1185498100.98kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185498100.98kmuller"]','1185498134.45kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185498134.45kmuller"]','1185498167.43kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185498167.43kmuller"]','1185498195.21kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185498195.21kmuller"]','1185498225.17kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185498225.17kmuller"]','1185498302.78kmuller': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1185498302.78kmuller"]','1257965445.33caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965445.33caoconno"]','1257965499.45caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965499.45caoconno"]','1257965525.12caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965525.12caoconno"]','1257965528.76caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965528.76caoconno"]','1257965602.03caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965602.03caoconno"]','1257965681.46caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965681.46caoconno"]','1257965691.43caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965691.43caoconno"]','1257965702.46caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965702.46caoconno"]','1257965738.45caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965738.45caoconno"]','1257965753.14caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965753.14caoconno"]','1257965770.29caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965770.29caoconno"]','1257965795.67caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965795.67caoconno"]','1257965806.04caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965806.04caoconno"]','1257965818.86caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965818.86caoconno"]','1257965862.56caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965862.56caoconno"]','1257965884.76caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965884.76caoconno"]','1257965915.03caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965915.03caoconno"]','1257965940.54caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257965940.54caoconno"]','1257966011.85caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966011.85caoconno"]','1257966041.18caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966041.18caoconno"]','1257966066.34caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966066.34caoconno"]','1257966095.53caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966095.53caoconno"]','1257966165.28caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966165.28caoconno"]','1257966183.09caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966183.09caoconno"]','1257966197.23caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966197.23caoconno"]','1257966236.85caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966236.85caoconno"]','1257966258.46caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966258.46caoconno"]','1257966326.74caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966326.74caoconno"]','1257966326.77caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966326.77caoconno"]','1257966326.79caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966326.79caoconno"]','1257966326.81caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966326.81caoconno"]','1257966326.82caoconno': '["Objects"]["1155774520.99fxlara0"]["Objects"]["1257966326.82caoconno"]'}}
extraInfo = {'camPos': Point3(-0.0909081, -10.1466, 0.0782145),'camHpr': VBase3(0, 0, 0),'focalLength': 0.852765381336,'skyState': -2,'fog': 0}
| 11,375
| 33,922
| 0.690051
| 4,651
| 34,125
| 4.99785
| 0.13954
| 0.023489
| 0.022586
| 0.018068
| 0.614627
| 0.515638
| 0.489654
| 0.439234
| 0.40955
| 0.373457
| 0
| 0.282046
| 0.04715
| 34,125
| 3
| 33,923
| 11,375
| 0.432833
| 0
| 0
| 0
| 0
| 0
| 0.557346
| 0.313016
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
31462927a47b411286fcf71684d60aeaa3144fc2
| 10,318
|
py
|
Python
|
tests/sampling/sampler_tests.py
|
skad00sh/gsudmlab-mvtsdata_toolkit
|
2c5495deb6d31eef556e7f410ac1c1632bffa961
|
[
"MIT"
] | 7
|
2020-07-07T10:27:02.000Z
|
2021-04-02T13:20:24.000Z
|
tests/sampling/sampler_tests.py
|
skad00sh/gsudmlab-mvtsdata_toolkit
|
2c5495deb6d31eef556e7f410ac1c1632bffa961
|
[
"MIT"
] | 3
|
2020-03-31T09:35:53.000Z
|
2021-08-23T20:46:33.000Z
|
tests/sampling/sampler_tests.py
|
skad00sh/gsudmlab-mvtsdata_toolkit
|
2c5495deb6d31eef556e7f410ac1c1632bffa961
|
[
"MIT"
] | 3
|
2020-08-31T10:21:21.000Z
|
2021-12-01T11:38:17.000Z
|
import unittest
import os
import pandas as pd
import numpy as np
from mvtsdatatoolkit.sampling.sampler import Sampler
import CONSTANTS as CONST
class TestSampler(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
path_to_mvts = os.path.join(CONST.ROOT,
'tests/test_dataset/extracted_features'
'/extracted_features_TESTS_SAMPLER.csv')
cls.mvts_df = pd.read_csv(path_to_mvts, sep='\t')
@classmethod
def tearDownClass(cls) -> None:
pass
def test_class_labels(self):
""" Tests if sampler returns the class labels correctly."""
sampler = Sampler(self.mvts_df, 'lab')
expected_labels = {'X', 'M', 'C', 'NF'}
actual_labels = set(sampler.class_labels)
self.assertSetEqual(actual_labels, expected_labels)
def test_original_class_populations(self):
""" Tests if sampler returns class populations correctly."""
sampler = Sampler(self.mvts_df, 'lab')
expected_dict = {'NF': 36, 'M': 4, 'X': 2, 'C': 8}
actual_dict = sampler.original_class_populations
self.assertDictEqual(actual_dict, expected_dict)
def test_original_class_ratios(self):
""" Tests if sampler returns class ratios correctly."""
sampler = Sampler(self.mvts_df, 'lab')
expected_dict = {'NF': 0.72, 'M': 0.08, 'X': 0.04, 'C': 0.16}
actual_dict = sampler.original_class_ratios
self.assertDictEqual(actual_dict, expected_dict)
def test_sampled_class_populations_x1(self):
""" Tests if sampler samples the right populations when desired populations are given."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = {'NF': 10, 'M': 10, 'X': 10, 'C': 10} # ---> given are populations
desrired_ratios = None
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_populations = desired_populations
self.assertDictEqual(sampler.sampled_class_populations, expected_populations)
def test_sampled_class_populations_x2(self):
""" Tests if sampler can handle 0 as desired populations."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = {'NF': 36, 'M': 0, 'X': 0, 'C': 8} # ---> 0 populations
desrired_ratios = None
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_populations = desired_populations
self.assertDictEqual(sampler.sampled_class_populations, expected_populations)
def test_sampled_class_populations_x3(self):
""" Tests if sampler can handle -1 as desired populations."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = {'NF': -1, 'M': 0, 'X': 0, 'C': -1} # ---> -1 populations
desrired_ratios = None
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_populations = {'NF': 36, 'M': 0, 'X': 0, 'C': 8}
self.assertDictEqual(sampler.sampled_class_populations, expected_populations)
def test_sampled_class_populations_x4(self):
""" Tests if sampler samples the right populations when desired ratios are given."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = None
desrired_ratios = {'NF': 0.10, 'M': 0.10, 'X': 0.10, 'C': 0.10} # ---> given are ratios
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_populations = {'NF': 5, 'M': 5, 'X': 5, 'C': 5} # 5 = 0.10 X total population
self.assertDictEqual(sampler.sampled_class_populations, expected_populations)
def test_sampled_class_populations_x5(self):
""" Tests if sampler can handle 0 as desired ratios, in terms of sampled populations."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = None
desrired_ratios = {'NF': 0.50, 'M': 0.0, 'X': 0.0, 'C': 0.50} # ---> given are ratios
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_populations = {'NF': 25, 'M': 0.0, 'X': 0.0, 'C': 25} # 25 = 0.5 X total
# population
self.assertDictEqual(sampler.sampled_class_populations, expected_populations)
def test_sampled_class_populations_x6(self):
""" Tests if sampler can handle -1 as desired ratios, in terms of sampled populations."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = None
desrired_ratios = {'NF': -1, 'M': 0.0, 'X': 0.0, 'C': -1} # ---> given are ratios
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_populations = {'NF': 36, 'M': 0, 'X': 0, 'C': 8} # [36, 0, 0, 8]
np.testing.assert_array_almost_equal(list(sampler.sampled_class_populations.values()),
list(expected_populations.values()),
decimal=2)
def test_sampled_class_ratios_x1(self):
""" Tests if sampler samples the right ratios when desired ratios are given."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = None
desrired_ratios = {'NF': 0.10, 'M': 0.10, 'X': 0.10, 'C': 0.10} # ---> given are ratios
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_ratios = {'NF': 0.25, 'M': 0.25, 'X': 0.25, 'C': 0.25} # 25% of new population
self.assertDictEqual(sampler.sampled_class_ratios, expected_ratios)
def test_sampled_class_ratios_x2(self):
""" Tests if sampler can handle 0 as desired ratios, in terms of sampled ratios."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = None
desrired_ratios = {'NF': 0.5, 'M': 0.0, 'X': 0.0, 'C': 0.50} # ---> given are ratios
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_ratios = {'NF': 0.5, 'M': 0.0, 'X': 0.0, 'C': 0.50} # 50% of new population
self.assertDictEqual(sampler.sampled_class_ratios, expected_ratios)
def test_sampled_class_ratios_x3(self):
""" Tests if sampler can handle -1 as desired ratios, in terms of sampled ratios."""
sampler = Sampler(self.mvts_df, 'lab')
desired_populations = None
desrired_ratios = {'NF': -1, 'M': 0.0, 'X': 0.0, 'C': -1} # ---> given are ratios
sampler.sample(desired_populations=desired_populations,
desired_ratios=desrired_ratios)
expected_ratios = {'NF': 0.82, 'M': 0.0, 'X': 0.0, 'C': 0.18} # [36/44, 0, 0, 8/44]
np.testing.assert_array_almost_equal(list(sampler.sampled_class_ratios.values()),
list(expected_ratios.values()),
decimal=2)
def test_undersample_x1(self):
""" Tests if undersampler samples correctly with based_minority set to 'NF'."""
sampler = Sampler(self.mvts_df, 'lab')
minority_labels = ['NF', 'C']
majority_labels = ['X', 'M']
base_minority = 'NF' # ---> base class is set to 'NF'
sampler.undersample(minority_labels=minority_labels,
majority_labels=majority_labels,
base_minority=base_minority)
expected_populations = {'NF': 36, 'M': 36, 'X': 36, 'C': 36} # |NF|=36 in original mvts
expected_ratios = {'NF': 0.25, 'M': 0.25, 'X': 0.25, 'C': 0.25} # 0.25 = 36 / (4 X 36)
self.assertDictEqual(expected_populations, sampler.sampled_class_populations)
self.assertDictEqual(expected_ratios, sampler.sampled_class_ratios)
def test_undersample_x2(self):
""" Tests if undersampler samples correctly with based_minority set to 'C'."""
sampler = Sampler(self.mvts_df, 'lab')
minority_labels = ['NF', 'C']
majority_labels = ['X', 'M']
base_minority = 'C' # ---> base class is set to 'C'
sampler.undersample(minority_labels=minority_labels,
majority_labels=majority_labels,
base_minority=base_minority)
expected_populations = {'NF': 8, 'M': 8, 'X': 8, 'C': 8} # |C|=8 in original mvts
self.assertDictEqual(expected_populations, sampler.sampled_class_populations)
def test_undersample_x3(self):
""" Tests if undersampler samples correctly with based_minority set to 'C'."""
sampler = Sampler(self.mvts_df, 'lab')
minority_labels = ['NF', 'C']
majority_labels = ['X', 'M']
base_minority = 'C' # ---> base class is set to 'C'
sampler.undersample(minority_labels=minority_labels,
majority_labels=majority_labels,
base_minority=base_minority)
expected_ratios = {'NF': 0.25, 'M': 0.25, 'X': 0.25, 'C': 0.25} # 0.25 = 8 / (4 X 8)
self.assertDictEqual(expected_ratios, sampler.sampled_class_ratios)
def test_oversample_x1(self):
""" Tests if oversampler samples correctly with based_majority set to 'M'."""
sampler = Sampler(self.mvts_df, 'lab')
minority_labels = ['NF', 'C']
majority_labels = ['X', 'M']
base_majority = 'M' # ---> base class is set to 'M'
sampler.oversample(minority_labels=minority_labels,
majority_labels=majority_labels,
base_majority=base_majority)
expected_populations = {'NF': 4, 'M': 4, 'X': 4, 'C': 4} # |M|=4 in original mvts
self.assertDictEqual(expected_populations, sampler.sampled_class_populations)
expected_ratios = {'NF': 0.25, 'M': 0.25, 'X': 0.25, 'C': 0.25} # 0.25 = 4 / (4 X 4)
self.assertDictEqual(expected_ratios, sampler.sampled_class_ratios)
if __name__ == '__main__':
unittest.main()
| 51.849246
| 100
| 0.613006
| 1,252
| 10,318
| 4.839457
| 0.095048
| 0.095065
| 0.07427
| 0.058095
| 0.833636
| 0.799307
| 0.789404
| 0.788249
| 0.73692
| 0.725367
| 0
| 0.035187
| 0.264586
| 10,318
| 198
| 101
| 52.111111
| 0.76331
| 0.163791
| 0
| 0.614379
| 0
| 0
| 0.033153
| 0.0087
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.117647
| false
| 0.006536
| 0.039216
| 0
| 0.163399
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
31804f98b0eea1113a0ccf3cc0b29ce1ff677c26
| 122
|
py
|
Python
|
cogs/dashboard/__init__.py
|
tuna2134/hortbot
|
43176217a59af9b3ed16b2aa911b3a267569009e
|
[
"BSD-3-Clause"
] | 1
|
2021-11-17T15:08:07.000Z
|
2021-11-17T15:08:07.000Z
|
cogs/dashboard/__init__.py
|
tuna2134/hortbot
|
43176217a59af9b3ed16b2aa911b3a267569009e
|
[
"BSD-3-Clause"
] | null | null | null |
cogs/dashboard/__init__.py
|
tuna2134/hortbot
|
43176217a59af9b3ed16b2aa911b3a267569009e
|
[
"BSD-3-Clause"
] | null | null | null |
from .discord import Discord
from .web import web
def setup(bot):
bot.add_cog(Discord(bot))
bot.add_cog(web(bot))
| 20.333333
| 29
| 0.713115
| 21
| 122
| 4.047619
| 0.428571
| 0.141176
| 0.211765
| 0.282353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163934
| 122
| 6
| 30
| 20.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
31a42aea0c2015392e9bdfd5566d048cba44ed1e
| 36
|
py
|
Python
|
t_1000/__init__.py
|
chao5645/T-1000
|
99751bcfd79bd94df3667e7311e3b3af2b912505
|
[
"MIT"
] | 111
|
2019-10-30T01:12:49.000Z
|
2022-03-10T04:54:43.000Z
|
t_1000/__init__.py
|
charlesedwards/T-1000
|
5d88f74ddb2a0d47c3101072d6b9f6971fb2ba26
|
[
"MIT"
] | 16
|
2019-10-24T15:52:05.000Z
|
2022-02-05T17:55:02.000Z
|
t_1000/__init__.py
|
charlesedwards/T-1000
|
5d88f74ddb2a0d47c3101072d6b9f6971fb2ba26
|
[
"MIT"
] | 33
|
2019-11-03T14:51:23.000Z
|
2021-12-02T07:40:25.000Z
|
from t_1000.application import T1000
| 36
| 36
| 0.888889
| 6
| 36
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.242424
| 0.083333
| 36
| 1
| 36
| 36
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
31bef2b3e4f27f6e4970d82923121f0d4bc59be7
| 8,942
|
py
|
Python
|
tests/unit_test_benchmarking.py
|
JamesPHoughton/pysd
|
5885d622144dd81af96e3c875bac74c51ddba62f
|
[
"MIT"
] | 240
|
2015-01-10T21:32:27.000Z
|
2022-03-18T07:55:55.000Z
|
tests/unit_test_benchmarking.py
|
JamesPHoughton/pysd
|
5885d622144dd81af96e3c875bac74c51ddba62f
|
[
"MIT"
] | 304
|
2015-01-20T18:51:06.000Z
|
2022-03-25T10:54:45.000Z
|
tests/unit_test_benchmarking.py
|
JamesPHoughton/pysd
|
5885d622144dd81af96e3c875bac74c51ddba62f
|
[
"MIT"
] | 72
|
2015-05-14T21:15:58.000Z
|
2022-02-04T16:33:31.000Z
|
import os
from unittest import TestCase
# most of the features of this script are already tested indirectly when
# running vensim and xmile integration tests
_root = os.path.dirname(__file__)
class TestErrors(TestCase):
def test_canonical_file_not_found(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(FileNotFoundError) as err:
runner(os.path.join(_root, "more-tests/not_existent.mdl"))
self.assertIn(
'Canonical output file not found.',
str(err.exception))
def test_non_valid_model(self):
from pysd.tools.benchmarking import runner
with self.assertRaises(ValueError) as err:
runner(os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
'Modelfile should be *.mdl or *.xmile',
str(err.exception))
def test_non_valid_outputs(self):
from pysd.tools.benchmarking import load_outputs
with self.assertRaises(ValueError) as err:
load_outputs(
os.path.join(
_root,
"more-tests/not_vensim/test_not_vensim.txt"))
self.assertIn(
"Not able to read '",
str(err.exception))
self.assertIn(
"more-tests/not_vensim/test_not_vensim.txt'.",
str(err.exception))
def test_different_frames_error(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")))
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertNotIn(
"Actual values:\n\t",
str(err.exception))
self.assertNotIn(
"Expected values:\n\t",
str(err.exception))
with self.assertRaises(AssertionError) as err:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
verbose=True)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(err.exception))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(err.exception))
self.assertIn(
"Actual values:\n\t",
str(err.exception))
self.assertIn(
"Expected values:\n\t",
str(err.exception))
def test_different_frames_warning(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import load_outputs, assert_frames_close
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertNotIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertNotIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertNotIn(
"Expected values:\n\t",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_modified.csv")),
assertion="warn", verbose=True)
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn(
"Following columns are not close:\n\tTeacup Temperature",
str(wu[0].message))
self.assertIn(
"Column 'Teacup Temperature' is not close.",
str(wu[0].message))
self.assertIn(
"Actual values:\n\t",
str(wu[0].message))
self.assertIn(
"Expected values:\n\t",
str(wu[0].message))
def test_transposed_frame(self):
from pysd.tools.benchmarking import load_outputs, assert_frames_close
assert_frames_close(
load_outputs(os.path.join(_root, "data/out_teacup.csv")),
load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True))
def test_load_columns(self):
from pysd.tools.benchmarking import load_outputs
out0 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"))
out1 = load_outputs(
os.path.join(_root, "data/out_teacup.csv"),
columns=["Room Temperature", "Teacup Temperature"])
out2 = load_outputs(
os.path.join(_root, "data/out_teacup_transposed.csv"),
transpose=True,
columns=["Heat Loss to Room"])
self.assertEqual(
set(out1.columns),
set(["Room Temperature", "Teacup Temperature"]))
self.assertEqual(
set(out2.columns),
set(["Heat Loss to Room"]))
self.assertTrue((out0.index == out1.index).all())
self.assertTrue((out0.index == out2.index).all())
def test_different_cols(self):
from warnings import catch_warnings
from pysd.tools.benchmarking import assert_frames_close
import pandas as pd
d1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'd': [6, 7]})
d2 = pd.DataFrame({'a': [1, 2]})
d3 = pd.DataFrame({'a': [1, 2], 'c': [3, 4]})
with self.assertRaises(ValueError) as err:
assert_frames_close(
actual=d1,
expected=d2)
self.assertIn(
"Columns from actual and expected values must be equal.",
str(err.exception))
with catch_warnings(record=True) as ws:
assert_frames_close(
actual=d1,
expected=d2,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from actual values not found in expected values.",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
expected=d1,
actual=d2,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from expected values not found in actual values.",
str(wu[0].message))
with catch_warnings(record=True) as ws:
assert_frames_close(
actual=d1,
expected=d3,
assertion="warn")
# use only user warnings
wu = [w for w in ws if issubclass(w.category, UserWarning)]
self.assertEqual(len(wu), 1)
self.assertIn("'b'", str(wu[0].message))
self.assertIn("'d'", str(wu[0].message))
self.assertIn(
"from actual values not found in expected values.",
str(wu[0].message))
self.assertIn(
"Columns 'c' from expected values not found in actual "
"values.", str(wu[0].message))
def test_invalid_input(self):
from pysd.tools.benchmarking import assert_frames_close
with self.assertRaises(TypeError) as err:
assert_frames_close(
actual=[1, 2],
expected=[1, 2])
self.assertIn(
"Inputs must both be pandas DataFrames.",
str(err.exception))
| 32.875
| 77
| 0.552449
| 1,007
| 8,942
| 4.776564
| 0.148957
| 0.064865
| 0.022453
| 0.048649
| 0.824324
| 0.802495
| 0.76736
| 0.706653
| 0.643867
| 0.623077
| 0
| 0.009992
| 0.339633
| 8,942
| 271
| 78
| 32.99631
| 0.804572
| 0.025498
| 0
| 0.721393
| 0
| 0
| 0.174248
| 0.037216
| 0
| 0
| 0
| 0
| 0.338308
| 1
| 0.044776
| false
| 0
| 0.069652
| 0
| 0.119403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7301facf76e5da26ad9ab128a5cb08be40a62481
| 5,965
|
py
|
Python
|
setup.py
|
molML/MoleculeACE
|
e831d2371a9b89f4853a03d5c04cc4bf59f64ee0
|
[
"MIT"
] | 9
|
2022-03-26T17:36:03.000Z
|
2022-03-29T19:50:26.000Z
|
setup.py
|
molML/MoleculeACE
|
e831d2371a9b89f4853a03d5c04cc4bf59f64ee0
|
[
"MIT"
] | null | null | null |
setup.py
|
molML/MoleculeACE
|
e831d2371a9b89f4853a03d5c04cc4bf59f64ee0
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='MoleculeACE',
version='1.0.9',
packages=['MoleculeACE', 'MoleculeACE.ML', 'MoleculeACE.CNN', 'MoleculeACE.MLP', 'MoleculeACE.GNN',
'MoleculeACE.GNN.data', 'MoleculeACE.GNN.models', 'MoleculeACE.GNN.models.optimization',
'MoleculeACE.LSTM', 'MoleculeACE.benchmark', 'MoleculeACE.benchmark.utils',
'MoleculeACE.benchmark.models', 'MoleculeACE.benchmark.evaluation',
'MoleculeACE.benchmark.data_processing', 'MoleculeACE.benchmark.data_processing.preprocessing',
'MoleculeACE.Data',
'MoleculeACE.Data.benchmark_data',
'MoleculeACE.Data.benchmark_data.train',
'MoleculeACE.Data.benchmark_data.test',
'MoleculeACE.Data.configures.benchmark',
'MoleculeACE.Data.configures.default',
'MoleculeACE.Data.configures',
'MoleculeACE.Data.configures.benchmark.CHEMBL4203_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL2034_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL233_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL4616_EC50',
'MoleculeACE.Data.configures.benchmark.CHEMBL287_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL218_EC50',
'MoleculeACE.Data.configures.benchmark.CHEMBL264_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL219_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL2835_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL2147_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL231_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL3979_EC50',
'MoleculeACE.Data.configures.benchmark.CHEMBL237_EC50',
'MoleculeACE.Data.configures.benchmark.CHEMBL244_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL4792_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL1871_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL237_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL262_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL2047_EC50',
'MoleculeACE.Data.configures.benchmark.CHEMBL239_EC50',
'MoleculeACE.Data.configures.benchmark.CHEMBL2971_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL204_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL214_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL1862_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL234_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL238_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL235_EC50',
'MoleculeACE.Data.configures.benchmark.CHEMBL4005_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL236_Ki',
'MoleculeACE.Data.configures.benchmark.CHEMBL228_Ki'
],
url='https://github.com/derekvantilborg/MoleculeACE',
license='MIT',
author='Derek van Tilborg',
author_email='d.w.v.tilborg@tue.nl',
description='MoleculeACE',
install_requires=[
'tqdm',
'requests',
'twine',
'importlib-metadata',
'pandas',
'numpy',
'chembl_webresource_client',
'scikit-learn',
'matplotlib',
'python-Levenshtein',
'progress',
'rdkit-pypi'
],
include_package_data=True,
package_data={'': ['Data/*',
'Data/benchmark_data/*',
'Data/benchmark_data/test/*',
'Data/benchmark_data/train/*',
'Data/configures/*',
'Data/configures/default/*',
'Data/configures/benchmark/*',
'Data/configures/benchmark/CHEMBL4203_Ki/*',
'Data/configures/benchmark/CHEMBL2034_Ki/*',
'Data/configures/benchmark/CHEMBL233_Ki/*',
'Data/configures/benchmark/CHEMBL4616_EC50/*',
'Data/configures/benchmark/CHEMBL287_Ki/*',
'Data/configures/benchmark/CHEMBL218_EC50/*',
'Data/configures/benchmark/CHEMBL264_Ki/*',
'Data/configures/benchmark/CHEMBL219_Ki/*',
'Data/configures/benchmark/CHEMBL2835_Ki/*',
'Data/configures/benchmark/CHEMBL2147_Ki/*',
'Data/configures/benchmark/CHEMBL231_Ki/*',
'Data/configures/benchmark/CHEMBL3979_EC50/*',
'Data/configures/benchmark/CHEMBL237_EC50/*',
'Data/configures/benchmark/CHEMBL244_Ki/*',
'Data/configures/benchmark/CHEMBL4792_Ki/*',
'Data/configures/benchmark/CHEMBL1871_Ki/*',
'Data/configures/benchmark/CHEMBL237_Ki/*',
'Data/configures/benchmark/CHEMBL262_Ki/*',
'Data/configures/benchmark/CHEMBL2047_EC50/*',
'Data/configures/benchmark/CHEMBL239_EC50/*',
'Data/configures/benchmark/CHEMBL2971_Ki/*',
'Data/configures/benchmark/CHEMBL204_Ki/*',
'Data/configures/benchmark/CHEMBL214_Ki/*',
'Data/configures/benchmark/CHEMBL1862_Ki/*',
'Data/configures/benchmark/CHEMBL234_Ki/*',
'Data/configures/benchmark/CHEMBL238_Ki/*',
'Data/configures/benchmark/CHEMBL235_EC50/*',
'Data/configures/benchmark/CHEMBL4005_Ki/*',
'Data/configures/benchmark/CHEMBL236_Ki/*',
'Data/configures/benchmark/CHEMBL228_Ki/*'
]}
)
| 55.231481
| 109
| 0.58793
| 478
| 5,965
| 7.179916
| 0.209205
| 0.269231
| 0.415501
| 0.30711
| 0.719988
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055859
| 0.294719
| 5,965
| 107
| 110
| 55.747664
| 0.759924
| 0
| 0
| 0.018868
| 0
| 0
| 0.623806
| 0.563118
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.018868
| 0
| 0.018868
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7310011ae7340cef3ca71cf061f21ce54bb9ce36
| 8,678
|
py
|
Python
|
matcher-server/background/libs/postprocessing.py
|
mauriciabad/3d-print-matcher
|
93a9edf350df4ac03ee02b53d22051396ba9792c
|
[
"MIT"
] | null | null | null |
matcher-server/background/libs/postprocessing.py
|
mauriciabad/3d-print-matcher
|
93a9edf350df4ac03ee02b53d22051396ba9792c
|
[
"MIT"
] | null | null | null |
matcher-server/background/libs/postprocessing.py
|
mauriciabad/3d-print-matcher
|
93a9edf350df4ac03ee02b53d22051396ba9792c
|
[
"MIT"
] | null | null | null |
"""
Name: Post-processing class file
Description: This file contains post-processing classes.
Version: [release][3.2]
Source url: https://github.com/OPHoperHPO/image-background-remove-tool
Author: Anodev (OPHoperHPO)[https://github.com/OPHoperHPO] .
License: Apache License 2.0
License:
Copyright 2020 OPHoperHPO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from PIL import Image
from background.libs.strings import POSTPROCESS_METHODS
logger = logging.getLogger(__name__)
def method_detect(method: str):
"""Detects which method to use and returns its object"""
if method in POSTPROCESS_METHODS:
if method == "rtb-bnb":
return RemovingTooTransparentBordersHardAndBlurringHardBorders()
elif method == "rtb-bnb2":
return RemovingTooTransparentBordersHardAndBlurringHardBordersTwo()
else:
return None
else:
return False
class RemovingTooTransparentBordersHardAndBlurringHardBordersTwo:
"""
This is the class for the image post-processing algorithm.
This algorithm improves the boundaries of the image obtained from the neural network.
It is based on the principle of removing too transparent pixels
and smoothing the borders after removing too transparent pixels.
"""
def __init__(self):
import cv2
import skimage
import numpy as np
self.cv2 = cv2
self.skimage = skimage
self.np = np
self.model = None
self.prep_image = None
self.orig_image = None
@staticmethod
def __extact_alpha_channel__(image):
"""
Extracts alpha channel from RGBA image
:param image: RGBA pil image
:return: RGB Pil image
"""
# Extract just the alpha channel
alpha = image.split()[-1]
# Create a new image with an opaque black background
bg = Image.new("RGBA", image.size, (0, 0, 0, 255))
# Copy the alpha channel to the new image using itself as the mask
bg.paste(alpha, mask=alpha)
return bg.convert("RGB")
def __blur_edges__(self, imaged):
"""
Blurs the edges of the image
:param imaged: RGBA Pil image
:return: RGBA PIL image
"""
image = self.np.array(imaged)
image = self.cv2.cvtColor(image, self.cv2.COLOR_RGBA2BGRA)
# extract alpha channel
a = image[:, :, 3]
# blur alpha channel
ab = self.cv2.GaussianBlur(a, (0, 0), sigmaX=2, sigmaY=2, borderType=self.cv2.BORDER_DEFAULT)
# stretch so that 255 -> 255 and 127.5 -> 0
aa = self.skimage.exposure.rescale_intensity(ab, in_range=(140, 255), out_range=(0, 255))
# replace alpha channel in input with new alpha channel
out = image.copy()
out[:, :, 3] = aa
image = self.cv2.cvtColor(out, self.cv2.COLOR_BGRA2RGBA)
return Image.fromarray(image)
def __remove_too_transparent_borders__(self, mask, tranp_val=31):
"""
Marks all pixels in the mask with a transparency greater than $tranp_val as opaque.
Pixels with transparency less than $tranp_val, as fully transparent
:param tranp_val: Integer value.
:return: Processed mask
"""
mask = self.np.array(mask.convert("L"))
height, weight = mask.shape
for h in range(height):
for w in range(weight):
val = mask[h, w]
if val > tranp_val:
mask[h, w] = 255
else:
mask[h, w] = 0
return Image.fromarray(mask)
def run(self, model, image, orig_image):
"""
Runs an image post-processing algorithm to improve background removal quality.
:param model: The class of the neural network used to remove the background.
:param image: Image without background
:param orig_image: Source image
"""
mask = self.__remove_too_transparent_borders__(self.__extact_alpha_channel__(image))
empty = Image.new("RGBA", orig_image.size)
image = Image.composite(orig_image, empty, mask)
image = self.__blur_edges__(image)
image = model.process_image(image)
mask = self.__remove_too_transparent_borders__(self.__extact_alpha_channel__(image))
empty = Image.new("RGBA", orig_image.size)
image = Image.composite(orig_image, empty, mask)
image = self.__blur_edges__(image)
return image
class RemovingTooTransparentBordersHardAndBlurringHardBorders:
"""
This is the class for the image post-processing algorithm.
This algorithm improves the boundaries of the image obtained from the neural network.
It is based on the principle of removing too transparent pixels
and smoothing the borders after removing too transparent pixels.
The algorithm performs this procedure twice.
For the first time, the algorithm processes the image from the neural network,
then sends the processed image back to the neural network, and then processes it again and returns it to the user.
This method gives the best result in combination with u2net without any preprocessing methods.
"""
def __init__(self):
import cv2
import skimage
import numpy as np
self.cv2 = cv2
self.skimage = skimage
self.np = np
self.model = None
self.prep_image = None
self.orig_image = None
@staticmethod
def __extact_alpha_channel__(image):
"""
Extracts alpha channel from RGBA image
:param image: RGBA pil image
:return: RGB Pil image
"""
# Extract just the alpha channel
alpha = image.split()[-1]
# Create a new image with an opaque black background
bg = Image.new("RGBA", image.size, (0, 0, 0, 255))
# Copy the alpha channel to the new image using itself as the mask
bg.paste(alpha, mask=alpha)
return bg.convert("RGB")
def __blur_edges__(self, imaged):
"""
Blurs the edges of the image
:param imaged: RGBA Pil image
:return: RGBA PIL image
"""
image = self.np.array(imaged)
image = self.cv2.cvtColor(image, self.cv2.COLOR_RGBA2BGRA)
# extract alpha channel
a = image[:, :, 3]
# blur alpha channel
ab = self.cv2.GaussianBlur(a, (0, 0), sigmaX=2, sigmaY=2, borderType=self.cv2.BORDER_DEFAULT)
# stretch so that 255 -> 255 and 127.5 -> 0
# noinspection PyUnresolvedReferences
aa = self.skimage.exposure.rescale_intensity(ab, in_range=(140, 255), out_range=(0, 255))
# replace alpha channel in input with new alpha channel
out = image.copy()
out[:, :, 3] = aa
image = self.cv2.cvtColor(out, self.cv2.COLOR_BGRA2RGBA)
return Image.fromarray(image)
def __remove_too_transparent_borders__(self, mask, tranp_val=31):
"""
Marks all pixels in the mask with a transparency greater than tranp_val as opaque.
Pixels with transparency less than tranp_val, as fully transparent
:param tranp_val: Integer value.
:return: Processed mask
"""
mask = self.np.array(mask.convert("L"))
height, weight = mask.shape
for h in range(height):
for w in range(weight):
val = mask[h, w]
if val > tranp_val:
mask[h, w] = 255
else:
mask[h, w] = 0
return Image.fromarray(mask)
def run(self, _, image, orig_image):
"""
Runs an image post-processing algorithm to improve background removal quality.
:param _: The class of the neural network used to remove the background.
:param image: Image without background
:param orig_image: Source image
"""
mask = self.__remove_too_transparent_borders__(self.__extact_alpha_channel__(image))
empty = Image.new("RGBA", orig_image.size)
image = Image.composite(orig_image, empty, mask)
image = self.__blur_edges__(image)
return image
| 38.229075
| 118
| 0.644388
| 1,103
| 8,678
| 4.939257
| 0.216682
| 0.04185
| 0.017621
| 0.021109
| 0.722651
| 0.722651
| 0.722651
| 0.722651
| 0.722651
| 0.722651
| 0
| 0.018477
| 0.276561
| 8,678
| 226
| 119
| 38.39823
| 0.849315
| 0.416916
| 0
| 0.841122
| 0
| 0
| 0.009309
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102804
| false
| 0
| 0.084112
| 0
| 0.317757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7312a3521acdd2c49e033253eb5751521f15b47d
| 9,720
|
py
|
Python
|
courses/models.py
|
sisekelohub/sisekelo
|
7e1b0de6abf07e65ed746d0d929c3de37fb421c3
|
[
"MIT"
] | 1
|
2022-02-20T16:03:04.000Z
|
2022-02-20T16:03:04.000Z
|
courses/models.py
|
sisekelohub/sisekelo
|
7e1b0de6abf07e65ed746d0d929c3de37fb421c3
|
[
"MIT"
] | null | null | null |
courses/models.py
|
sisekelohub/sisekelo
|
7e1b0de6abf07e65ed746d0d929c3de37fb421c3
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.text import slugify
from django.utils.timezone import now
from django.urls import reverse
class Nfq(models.Model):
name = models.CharField(max_length=100)
level = models.IntegerField(null=True)
def __str__(self):
return self.name
class Program_Catalogue(models.Model):
name = models.CharField(max_length=100)
# number = models.IntegerField(null=True)
def __str__(self):
return self.name
class Accredited_Program(models.Model):
ACCREDITED_TYPE = (
('Learnership', 'Learnership'),
('Short Courses', 'Short Courses'),
('Skills Program', 'Skills Program')
)
CERTIFICATE_TYPE = (
('National Certificate', 'National Certificate'),
('Further Education & Training', 'Further Education & Training')
)
MODE_OF_DELIVERY = (
('Online', 'Online'),
('Physical', 'Physical'),
('Hybrid', 'Hybrid'),
)
TARGET_AUDIENCE = (
('Beginner', 'Beginner'),
('Upskilling', 'Upskilling'),
('Expert', 'Expert'),
)
PAYMENT_OPTION = (
('EFT', 'EFT'),
('CARD', 'CARD'),
('BANK TRANSFER', 'BANK TRANSFER'),
)
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
program_type = models.CharField(max_length=50, choices=ACCREDITED_TYPE, default='Learnership')
title = models.CharField(max_length=200)
certificate_type = models.CharField(max_length=50, choices=CERTIFICATE_TYPE, default='National Certificate')
description = models.TextField(blank=True, null=True )
image = models.ImageField(upload_to='uploads/learnership_images/', null=True)
mode_of_delivery = models.CharField(max_length=10, choices=MODE_OF_DELIVERY)
target_audiences = models.CharField(max_length=50, choices=TARGET_AUDIENCE, default="Beginner")
career_prospects = models.TextField(null=False, default="")
nfq_level = models.ForeignKey(Nfq, on_delete=models.CASCADE, related_name="nfqlevel1", default="1")
credits = models.IntegerField(null=False, default="130")
# outcomes = models.TextField(null=False, default="")
expectations = models.TextField(blank=True)
modules = models.TextField(null=True, default="")
price = models.DecimalField(max_digits=10, decimal_places=4, null=True)
payment_options = models.CharField(max_length=50, choices=PAYMENT_OPTION, default="Beginner")
# duration = models.DurationField(null=False)
duration = models.CharField(max_length=50, null=True)
start_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
end_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
brochure = models.FileField(upload_to='uploads/learnership_brochures', null=False, default="")
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft')
slug = models.SlugField(max_length=200, primary_key=True, auto_created=False, default = "")
is_published = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now=True)
updated_at = models.DateTimeField (auto_now=True)
def __str__(self):
return self.title
# def get_absolute_url(self):
# return reverse("courses:course_detail", kwargs={"slug": self.slug})
class Specialized_Course(models.Model):
ACCREDITED_TYPE = (
('Work Readiness Program', 'Work Readiness Program'),
('Financial Literacy', 'Financial Literacy'),
('Data Science & Python', 'Data Science & Python'),
('Digital Immersion Program', 'Digital Immersion Program'),
('Specialized Development Program', 'Specialized Development Program'),
('Animation Program', 'Animation Program'),
)
MODE_OF_DELIVERY = (
('O', 'Online'),
('P', 'Physical'),
('H', 'Hybrid'),
)
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
program_type = models.CharField(max_length=50, choices=ACCREDITED_TYPE, default='Work Readiness Program')
title = models.CharField(max_length=200)
overview = models.TextField(null=False, default="")
description = models.TextField(blank=False, null=True)
image = models.ImageField(upload_to='uploads/learnership_images/', null=True)
mode_of_delivery = models.CharField(max_length=1, choices=MODE_OF_DELIVERY, default='O')
expectations = models.TextField(blank=True)
price = models.DecimalField(max_digits=6, decimal_places=2, null=True)
duration = models.CharField(max_length=50, null=True)
start_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
end_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
specialized_brochure = models.FileField(upload_to='uploads/learnership_brochures', null=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft')
def __str__(self):
return self.title
class Learnership(models.Model):
CERTIFICATE_TYPE = (
('National Certificate', 'National Certificate'),
('Further Education & Training', 'Further Education & Training')
)
MODE_OF_DELIVERY = (
('Online', 'Online'),
('Physical', 'Physical'),
('Hybrid', 'Hybrid'),
)
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
title = models.CharField(max_length=200)
certificate_type = models.CharField(max_length=50, choices=CERTIFICATE_TYPE, default='National Certificate')
description = models.TextField(blank=False)
image = models.ImageField(upload_to='uploads/learnership_images/', null=True)
mode_of_delivery = models.CharField(max_length=10, choices=MODE_OF_DELIVERY)
nfq_level = models.ForeignKey(Nfq, on_delete=models.CASCADE, related_name="nfqlevel")
credits = models.IntegerField(null=False)
# outcomes = models.TextField(null=False, default="")
expectations = models.TextField(blank=True)
price = models.DecimalField(max_digits=6, decimal_places=2, null=True)
# duration = models.DurationField(null=False)
duration = models.CharField(max_length=50, null=True)
start_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
end_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
modules = models.TextField(null=False, default="")
brochure = models.FileField(upload_to='uploads/learnership_brochures')
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft')
slug = models.SlugField(max_length=200, primary_key=True, auto_created=False, default = "")
is_published = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now=True)
updated_at = models.DateTimeField (auto_now=True)
def __str__(self):
return self.title
# def save(self, *args, **kwargs):
# self.slug = slugify(self.title)
# super(Course, self).save(*args, **kwargs)
class Short_Course(models.Model):
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
MODE_OF_DELIVERY = (
('Online', 'Online'),
('Physical', 'Physical'),
('Hybrid', 'Hybrid'),
)
title = models.CharField(max_length=200)
description = models.TextField(blank=False, null=True)
image = models.ImageField(upload_to='uploads/learnership_images/', null=True)
mode_of_delivery = models.CharField(max_length=30, choices=MODE_OF_DELIVERY, default='O')
expectations = models.TextField(blank=True)
price = models.DecimalField(max_digits=6, decimal_places=2, null=True)
duration = models.CharField(max_length=50, null=True)
start_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
end_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
overview = models.TextField(null=False, default="")
accredited_brochure = models.FileField(upload_to='uploads/learnership_brochures', null=True)
slug = models.SlugField(max_length=200, primary_key=True, auto_created=False, default = "")
is_published = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now=True)
updated_at = models.DateTimeField (auto_now=True)
def __str__(self):
return self.title
# class Skills_Program(models.Model):
# STATUS_CHOICES = (
# ('draft', 'Draft'),
# ('published', 'Published'),
# )
# MODE_OF_DELIVERY = (
# ('O', 'Online'),
# ('P', 'Physical'),
# ('H', 'Hybrid'),
# )
# title = models.CharField(max_length=200)
# description = models.TextField(blank=False, null=True)
# image = models.ImageField(upload_to='uploads/learnership_images/', null=True)
# mode_of_delivery = models.CharField(max_length=1, choices=MODE_OF_DELIVERY, default='O')
# expectations = models.TextField(blank=True)
# price = models.DecimalField(max_digits=6, decimal_places=2, null=True)
# duration = models.CharField(max_length=50, null=True)
# start_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
# end_date = models.DateTimeField(auto_now=False, auto_now_add=False, default=None)
# overview = models.TextField(null=False, default="")
# skills_brochure = models.FileField(upload_to='uploads/learnership_brochures', null=True)
# status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft')
# def __str__(self):
# return self.title
# class Online(models.Model):
# title = models.CharField(max_length=200)
| 42.819383
| 112
| 0.692078
| 1,119
| 9,720
| 5.810545
| 0.134048
| 0.04291
| 0.077515
| 0.103353
| 0.822362
| 0.796678
| 0.770686
| 0.763611
| 0.741772
| 0.718702
| 0
| 0.010849
| 0.175
| 9,720
| 227
| 113
| 42.819383
| 0.799975
| 0.172222
| 0
| 0.598765
| 0
| 0
| 0.153558
| 0.027965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.024691
| 0.037037
| 0.67284
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
733ae627325cd73e8e340f347fb348563eaa6c4c
| 141
|
py
|
Python
|
app/tokengen.py
|
thenigan/online-ratings
|
bf0caf087bce80560aa9fb3e44ec620f652eb96a
|
[
"MIT"
] | null | null | null |
app/tokengen.py
|
thenigan/online-ratings
|
bf0caf087bce80560aa9fb3e44ec620f652eb96a
|
[
"MIT"
] | null | null | null |
app/tokengen.py
|
thenigan/online-ratings
|
bf0caf087bce80560aa9fb3e44ec620f652eb96a
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
class UUIDTokenGenerator():
def __init__(self):
pass
def create(self):
return str(uuid4())
| 14.1
| 27
| 0.624113
| 16
| 141
| 5.25
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.283688
| 141
| 9
| 28
| 15.666667
| 0.811881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
7355fa42ca301ff2485c928b1e2b7dfa6d1938d4
| 101,729
|
py
|
Python
|
status_handler.py
|
manhon95/QMG_bot
|
3409bb3d3031aa22e3dd27cbbc37c6b0bf20fc42
|
[
"BSD-3-Clause"
] | null | null | null |
status_handler.py
|
manhon95/QMG_bot
|
3409bb3d3031aa22e3dd27cbbc37c6b0bf20fc42
|
[
"BSD-3-Clause"
] | 4
|
2021-12-15T05:06:13.000Z
|
2021-12-15T05:07:32.000Z
|
status_handler.py
|
manhon95/QMG_bot
|
3409bb3d3031aa22e3dd27cbbc37c6b0bf20fc42
|
[
"BSD-3-Clause"
] | null | null | null |
import telegram
import sqlite3
import function
import cardfunction
import thread_lock
import ast
import air
import drawmap
import os
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, MessageHandler, Filters
org_dir = os.getcwd()
class handler():
def __init__(self, type_, active_country_id, lock_id, passive_country_id = None, card_id = None, space_id = None, piece_id = None):
self.type_ = type_
self.active_country_id = active_country_id
self.passive_country_id = passive_country_id
self.card_id = card_id
self.space_id = space_id
self.piece_id = piece_id
self.lock_id = lock_id
self.message_id = {'ge':None, 'jp':None, 'it':None, 'uk':None, 'su':None, 'us':None, 'fr':None, 'ch':None}
self.no_respone = {'ge':True, 'jp':True, 'it':True, 'uk':True, 'su':True, 'us':True, 'fr':True, 'ch':True}
self.one_side_pass = False
self.air_defense = False
self.air_attack = False
self.first = True
text = "status_handler add: "
info_list = {"type_":type_, "active_country_id":active_country_id, "passive_country_id":passive_country_id, "card_id":card_id, "space_id":space_id, "piece_id":piece_id, "lock_id":lock_id}
for info in info_list:
if info_list[info] != None:
text += " [" + info + ": " + str(info_list[info]) + "]"
print(text)
enemy_country_list = {'ge':['uk', 'su', 'us', 'fr', 'ch'],
'jp':['uk', 'su', 'us', 'fr', 'ch'],
'it':['uk', 'su', 'us', 'fr', 'ch'],
'uk':['ge', 'jp', 'it'],
'su':['ge', 'jp', 'it'],
'us':['ge', 'jp', 'it'],
'fr':['ge', 'jp', 'it'],
'ch':['ge', 'jp', 'it'] }
friendly_country_list = {'ge':['ge', 'jp', 'it'],
'jp':['ge', 'jp', 'it'],
'it':['ge', 'jp', 'it'],
'uk':['uk', 'su', 'us', 'fr', 'ch'],
'su':['uk', 'su', 'us', 'fr', 'ch'],
'us':['uk', 'su', 'us', 'fr', 'ch'],
'fr':['uk', 'su', 'us', 'fr', 'ch'],
'ch':['uk', 'su', 'us', 'fr', 'ch']}
def send_status_card(bot, active_country_id, type_, lock_id, session, passive_country_id = None, card_id = None, space_id = None, piece_id = None):
db = sqlite3.connect(session.get_db_dir())
session.draw_map()
session.handler_list.append(handler(type_, active_country_id, lock_id, passive_country_id, card_id, space_id, piece_id))
print("status_handler_id: " + str(len(session.handler_list)-1))
handler_id = len(session.handler_list)-1
#enemy_country_list = db.execute("select id from country where side = (select enemy from country where id = :country);", {'country':active_country_id}).fetchall()
pass_ = True
for country in enemy_country_list[active_country_id]:
info = info_list[type_](country, handler_id, session)
if info[2] == None:
session.handler_list[handler_id].no_respone[country] = True
else:
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
if pass_:
air.check_reposition(bot, session)
session.handler_list[handler_id].first = False
session.handler_list[handler_id].one_side_pass = True
#friendly_country_list = db.execute("select id from country where side = (select side from country where id = :country);", {'country':active_country_id}).fetchall()
pass_ = True
for country in friendly_country_list[active_country_id]:
info = info_list[type_](country, handler_id, session)
if info[2] == None:
session.handler_list[handler_id].no_respone[country] = True
else:
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
print(country + ' add status_message_id:' + str(status_message_id.message_id))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
if pass_:
air.check_reposition(bot, session)
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
else:
session.thread_lock(lock_id)
else:
session.thread_lock(lock_id)
def send_status_card_cb(bot, query, query_list, session):
db = sqlite3.connect(session.get_db_dir())
handler_id = query_list[2]
info_type = session.handler_list[handler_id].type_
lock_id = session.handler_list[handler_id].lock_id
if session.handler_list[handler_id].card_id != None:
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':session.handler_list[handler_id].card_id}).fetchall()
if query_list[3] == 'pass':
session.handler_list[handler_id].first = False
session.handler_list[handler_id].no_respone[query_list[1]] = True
#friendly_country_list = db.execute("select id, playerid from country where side = (select side from country where id = :country);", {'country':query_list[1]}).fetchall()
if not all([session.handler_list[handler_id].no_respone[country] for country in friendly_country_list[query_list[1]]]):
if session.handler_list[handler_id].card_id != None:
text = "<b>[" + card_name[0][0] + " - " + info_type + "]</b> - You pass, waiting other players..."
else:
text = "<b>[" + info_type + "]</b> - You pass, waiting other players..."
bot.edit_message_caption(chat_id=query.message.chat_id, message_id=query.message.message_id, caption=text , parse_mode=telegram.ParseMode.HTML)
return
for country in friendly_country_list[query_list[1]]:
message_id = session.handler_list[handler_id].message_id[country]
print(country + ' status_message_id: ' + str(message_id))
if message_id != None:
chat_id = db.execute("select playerid from country where id =:country;", {'country':country}).fetchall()
bot.delete_message(chat_id=chat_id[0][0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
if session.handler_list[handler_id].one_side_pass:
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
return
session.handler_list[handler_id].one_side_pass = True
session.handler_list[handler_id].first = False
elif query_list[3] == 'confirm':
if query_list[-1] == 'air_a':
text = "<b>[" + info_type + "]</b> - You used Air Attack, processsing..."
elif query_list[-1] == 'air_d':
text = "<b>[" + info_type + "]</b> - You used Air Defense, processsing..."
elif session.handler_list[handler_id].card_id != None:
used_card_name = db.execute("select name from card where cardid = :card;",{'card':query_list[-1]}).fetchall()
text = "<b>[" + card_name[0][0] + " - " + info_type + "]</b> - You used " + used_card_name[0][0] + ", processsing..."
else:
used_card_name = db.execute("select name from card where cardid = :card;",{'card':query_list[-1]}).fetchall()
text = "<b>[" + info_type + "]</b> - You used " + used_card_name[0][0] + ", processsing..."
bot.edit_message_caption(chat_id=query.message.chat_id, message_id=query.message.message_id, caption=text, parse_mode=telegram.ParseMode.HTML)
for country in friendly_country_list[query_list[1]]:
if country != query_list[1]:
message_id = session.handler_list[handler_id].message_id[country]
if message_id != None:
chat_id = db.execute("select playerid from country where id =:country;", {'country':country}).fetchall()
bot.delete_message(chat_id=chat_id[0][0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
session.handler_list[handler_id].one_side_pass = False
#card execute
if query_list[-1] == 'air_a':
air_a_lock_id = session.add_lock()
air.air_attack_list.append(air.air_attack(query_list[2], air_a_lock_id, session))
print("air_attack_id: " + str(len(air.air_attack_list)-1))
air_attack_id = len(air.air_attack_list)-1
info = air.air_attack_list[air_attack_id].air_attack_info(session)
bot.send_message(chat_id = info[0], text = info[1], reply_markup = info[2])
session.thread_lock(air_a_lock_id)
elif query_list[-1] == 'air_d':
air.air_defense(bot, query_list[2], session)
else:
cardfunction.play_status(bot, query_list[-1], query_list[1], query_list[2], session)
#card execute
bot.delete_message(chat_id=query.message.chat_id, message_id=query.message.message_id)
session.handler_list[handler_id].message_id[query_list[1]] = None
session.handler_list[handler_id].first = False
else:
if query_list[3] == 'back':
info = info_list[info_type](query_list[1], handler_id, session)
chat_id = info[0]
text = info[1]
reply_markup = info[2]
else:
selected = db.execute("select name, type, text from card where cardid = :cardid;", {'cardid':query_list[-1]}).fetchall()
text = "<b>" + selected[0][0] + "</b> - " + selected[0][1] + " - " + selected[0][2]
keyboard = []
if query_list[3] != 'no_play':
keyboard += [[InlineKeyboardButton('Confirm', callback_data="['{}', '{}', {}, 'confirm', {}]".format(query_list[0], query_list[1], query_list[2], query_list[-1]))]]
keyboard += [[InlineKeyboardButton('Back', callback_data="['{}', '{}', {}, 'back']".format(query_list[0], query_list[1], query_list[2]))]]
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_caption(chat_id=query.message.chat_id, message_id=query.message.message_id, caption=text, reply_markup=reply_markup, parse_mode=telegram.ParseMode.HTML)
if query_list[3] in ['pass', 'confirm']:
#enemy_country_list = db.execute("select id, playerid from country where side = (select enemy from country where id = :country);", {'country':query_list[1]}).fetchall()
pass_ = True
for country in enemy_country_list[query_list[1]]:
info = info_list[info_type](country, handler_id, session)
message_id = session.handler_list[handler_id].message_id[country]
if info[2] == None: #No response
if message_id != None:
bot.delete_message(chat_id= info[0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
session.handler_list[handler_id].no_respone[country] = True
else: #Have response
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
if message_id == None:
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
else:
bot.edit_message_caption(chat_id = info[0], message_id = message_id, caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML)
if pass_:
air.check_reposition(bot, session)
if session.handler_list[handler_id].one_side_pass:
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
return
session.handler_list[handler_id].one_side_pass = True
pass_ = True
#friendly_country_list = db.execute("select id, playerid from country where side = (select side from country where id = :country);", {'country':query_list[1]}).fetchall()
for country in friendly_country_list[query_list[1]]:
info = info_list[info_type](country, handler_id, session)
message_id = session.handler_list[handler_id].message_id[country]
if info[2] == None: #No respone
if message_id != None:
bot.delete_message(chat_id= info[0], message_id = message_id)
session.handler_list[handler_id].message_id[country] = None
session.handler_list[handler_id].no_respone[country] = True
else: #Have respone
print('have - response ' + country)
session.handler_list[handler_id].no_respone[country] = False
pass_ = False
if message_id == None:
status_message_id = bot.send_photo(chat_id = info[0], caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML, photo=open(session.get_dir() + '/tmp.jpg', 'rb'))
session.handler_list[handler_id].message_id[country] = status_message_id.message_id
else:
bot.edit_message_caption(chat_id = info[0], message_id = message_id, caption = info[1], reply_markup = info[2], parse_mode=telegram.ParseMode.HTML)
if pass_:
air.check_reposition(bot, session)
session.handler_list.pop(handler_id)
session.release_lock(lock_id)
#------------------------------------------Status Handler Info------------------------------------------
#--------------------------------------------Battle---------------------------------------------
def status_battle_handler(bot, active_country, passive_country, space, session):
print('in status_battle_handler - ' + active_country)
db = sqlite3.connect(session.get_db_dir())
s = [41, 47, 52, 347]
space_info = db.execute("select distinct spaceid, type, name from space where spaceid = :space", {'space':space}).fetchall()
questionmarks = '?' * len(s)
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s)).fetchall()
if len(avaliable_card) > 0:
for card in avaliable_card:
if card[0] == 41 and passive_country in ('ge','jp','it') and space == 12:
cardfunction.c41(bot, active_country, session)
if card[0] == 47 and passive_country == 'ge' and space_info[0][1] == 'land':
cardfunction.c47(bot, active_country, session)
db.execute("update card set location = 'turn' where cardid = 47")
if card[0] == 52 and passive_country in ('ge','jp','it') and space == 16:
cardfunction.c52(bot, active_country, session)
db.execute("update card set location = 'turn' where cardid = 52")
if card[0] == 347 and passive_country =='ch':
cardfunction.c347(bot, session)
db.commit()
def status_battle_handler_info(country, handler_id, session):
print('in status_battle_handler_info - ' + country)
db = sqlite3.connect(session.get_db_dir())
s = {'ge':[43, 45], 'jp':[97, 98, 99, 101, 102, 104, 107, 109, 112, 119, 120], 'it':[167, 168, 170], 'uk':[229, 230, 231, 232, 234, 242], 'su':[276, 284, 286, 287, 288, 289, 291, 292, 296, 303], 'us':[344, 346, 350, 363], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
space_info = db.execute("select distinct spaceid, type, name from space where spaceid = :space", {'space':session.handler_list[handler_id].space_id}).fetchall()
piece_info = db.execute("select control, location, supply, type from piece where pieceid = :piece;", {'piece':session.handler_list[handler_id].piece_id}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
piece_count = db.execute("select count(*) from piece where control = 'su' and type = 'army' and location != 'none';").fetchall()[0][0]
if country == 'us':
ew_count = db.execute("select count(*) from card where location = 'hand' and control ='us' and type = 'Economic Warfare';").fetchall()[0][0]
if not cardfunction.c59_used:
if country == session.handler_list[handler_id].active_country_id:
list1 = function.within(function.getside[session.handler_list[handler_id].active_country_id], [session.handler_list[handler_id].space_id], 1, db)
list2 = function.control_air_space_list(session.handler_list[handler_id].active_country_id, db)
if len([list1 and list2]) > 0 and session.handler_list[handler_id].air_defense and not session.handler_list[handler_id].air_attack:
keyboard.append([InlineKeyboardButton('Air Attack', callback_data="['status_battle', '{}', {}, 'confirm', 'air_a']".format(country, handler_id))])
if country == session.handler_list[handler_id].passive_country_id and session.handler_list[handler_id].first:
if session.handler_list[handler_id].space_id in function.control_air_space_list(session.handler_list[handler_id].passive_country_id, db):
keyboard.append([InlineKeyboardButton('Air Defense', callback_data="['status_battle', '{}', {}, 'confirm', 'air_d']".format(country, handler_id))])
if len(avaliable_card) > 0:
for card in avaliable_card:
if country == 'ge':
if card[0] == 43 and active_country == 'ge' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 45 and active_country == 'ge' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'jp':
if card[0] == 97 and active_country == 'jp' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data = "['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 98 and piece_info[0][0] == 'jp' and space_info[0][0] in function.supplied_space_list('jp', db, space_type = 'sea'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 99 and active_country == 'jp' and space_info[0][0] in [35,36,37]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 101 and active_country == 'jp' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 102 and active_country == 'jp' and space_info[0][0] == 36:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 104 and active_country == 'jp' and space_info[0][0] == 32:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 107 and piece_info[0][0] == 'jp' and space_info[0][0] in list(set([35,37,42]) & set(function.supplied_space_list('jp', db, space_type = 'land'))):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 109 and piece_info[0][0] == 'jp' and space_info[0][0] in [38,43]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 112 and active_country == 'jp' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 119 and active_country == 'jp' and space_info[0][1] == 'sea':
if response_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Response card in hand', callback_data="['status_battle', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if card[0] == 120 and piece_info[0][0] == 'jp' and space_info[0][1] == 'land':
if response_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Response card in hand', callback_data="['status_battle', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'it':
if card[0] == 167 and piece_info[0][0] == 'it' and space_info[0][0] in function.within('Axis', [17], 1, db):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 168 and piece_info[0][0] == 'ge' and space_info[0][0] == 17:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 170 and piece_info[0][0] == 'ge' and piece_info[0][3] == 'army' and space_info[0][0] in function.supplied_space_list('ge', db, space_type = 'land'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'uk':
if card[0] == 229 and active_country == 'uk' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 230 and piece_info[0][0] == 'uk' and space_info[0][0] in function.supplied_space_list('uk', db, space_type = 'land'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 231 and piece_info[0][0] in ['uk','us'] and space_info[0][0] in list(set(function.within('Allies', function.control_supplied_space_list('uk', db, space_type = 'land'), 1, db)) & set(function.supplied_space_list(piece_info[0][0], db, space_type = 'sea'))):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 232 and active_country in ['uk','us','fr'] and space_info[0][0] in [12,13,19,25]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 234 and piece_info[0][0] in ['uk','su','us','fr','ch'] and space_info[0][0] in list(set([8,9]) & set(function.supplied_space_list(piece_info[0][0], db, space_type = 'sea'))):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 242 and piece_info[0][0] == 'fr' and space_info[0][0] == 12:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 276 and active_country == 'su' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 284 and active_country == 'su' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 286 and piece_info[0][0] == 'su' and space_info[0][0] in [30,31]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 287 and piece_info[0][0] == 'su':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 288 and piece_info[0][0] == 'su' and space_info[0][0] == 20:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 289 and piece_info[0][0] == 'su' and space_info[0][0] == 28:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 291 and piece_info[0][0] == 'su' and space_info[0][0] in[24,28]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 292 and piece_info[0][0] == 'su' and space_info[0][0] == 24:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 296 and piece_info[0][0] == 'su' and piece_count == 0:
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_battle', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if card[0] == 303 and active_country == 'su' and space_info[0][0] in[20,21,22,24]:
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_battle', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 344 and active_country == 'us' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 346 and active_country == 'us' and space_info[0][1] == 'land' and space_info[0][0] in function.within('Allies', function.control_supplied_space_list('us', db, space_type = 'sea'), 1, db):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 350 and piece_info[0][0] == 'us' and piece_info[0][3] == 'navy' and space_info[0][0] in function.supplied_space_list('us', db, space_type = 'sea'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 363 and active_country == 'us':
if ew_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_battle', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Economic Warfare in hand', callback_data="['status_battle', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':session.handler_list[handler_id].card_id}).fetchall()
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_battle', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = "<b>[" + card_name[0][0] + "]</b> - " + function.countryid2name[country] + " - " + space_info[0][2] + " is battled by " + function.countryid2name[active_country]
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#--------------------------------------------Build---------------------------------------------
def status_build_handler(bot, active_country, session):
print('in status_build_handler - ' + active_country)
db = sqlite3.connect(session.get_db_dir())
s = [347]
questionmarks = '?' * len(s)
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s)).fetchall()
if len(avaliable_card) > 0:
for card in avaliable_card:
if card[0] == 347 and active_country =='ch':
cardfunction.c347(bot, session)
db.commit()
def status_build_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_build_handler_info - ' + country)
s = {'ge': [42,50,58], 'jp':[106, 110, 111, 121], 'it':[169], 'uk':[233], 'su':[275, 280, 282, 290], 'us':[348, 353, 354, 357, 362], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
space_info = db.execute("select distinct spaceid, type, name from space where spaceid = :space", {'space':session.handler_list[handler_id].space_id}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 42 and active_country == 'ge' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 50 and active_country == 'ge' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 58 and active_country == 'ge' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'jp':
if card[0] == 106 and active_country in ['uk', 'su', 'us', 'fr', 'ch'] and space_info[0][0] in function.filter_space_list(function.within('Axis', function.control_space_list('jp', db), 1, db), db, control = 'all', space_type = 'sea'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 110 and active_country == 'jp' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 111 and active_country == 'jp' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 121 and active_country == 'jp' and space_info[0][1] == 'sea':
if response_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Response card in hand', callback_data="['status_build', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'it':
if card[0] == 169 and active_country == 'su' and (space_info[0][0] in function.filter_space_list(function.within('Allies', function.control_space_list('uk', db), 1, db), db, control = 'all', space_type = 'land') or space_info[0][0] in function.filter_space_list(function.within('Allies', function.control_space_list('us', db), 1, db), db, control = 'all', space_type = 'land')):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'uk':
if card[0] == 233 and active_country in ['ge', 'jp', 'it'] and space_info[0][0] in [1,32,41]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 275 and active_country == 'su' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 280 and active_country == 'su' and space_info[0][1] == 'land':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_build', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if card[0] == 282 and active_country == 'su' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 290 and active_country in ['ge', 'jp', 'it'] and space_info[0][0] in [20,24,28,30,31]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 348 and active_country == 'us' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 353 and active_country == 'us' and space_info[0][1] == 'sea':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 354 and active_country == 'us' and space_info[0][0] in [3,5,7,27,40,43,44,47,50,53]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 357 and active_country == 'us' and space_info[0][1] == 'land':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 362 and active_country == 'us' and space_info[0][0] in [3,44,47,50,53]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_build', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':session.handler_list[handler_id].card_id}).fetchall()
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_build', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = "<b>[" + card_name[0][0] + "]</b> - " + function.countryid2name[country] + " - " + function.countryid2name[active_country] + " built in " + space_info[0][2]
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#--------------------------------------------Remove---------------------------------------------
def status_remove_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_remove_handler_info - ' + country)
s = {'ge': [], 'jp':[98, 107, 109], 'it':[168, 170], 'uk':[230, 231, 234, 242], 'su':[286, 288, 289, 291, 292, 296], 'us':[350], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
piece_info = db.execute("select control, location, supply, type from piece where pieceid = :piece", {'piece':session.handler_list[handler_id].piece_id}).fetchall()
space_info = db.execute("select distinct spaceid, type, name from space where spaceid = :space", {'space':session.handler_list[handler_id].space_id}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
piece_count = db.execute("select count(*) from piece where control = 'su' and type = 'army' and location != 'none';").fetchall()[0][0]
for card in avaliable_card:
if country == 'jp':
if card[0] == 98 and piece_info[0][0] == 'jp' and space_info[0][0] in function.supplied_space_list('jp', db, space_type = 'sea'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 107 and piece_info[0][0] == 'jp' and space_info[0][0] in list(set([35,37,42]) & set(function.supplied_space_list('jp', db, space_type = 'land'))):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 109 and piece_info[0][0] == 'jp' and space_info[0][0] in [38,43]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'it':
if card[0] == 168 and piece_info[0][0] == 'ge' and space_info[0][0] == 17:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 170 and piece_info[0][0] == 'ge' and piece_info[0][3] == 'army' and space_info[0][0] in function.supplied_space_list('ge', db, space_type = 'land'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'uk':
if card[0] == 230 and piece_info[0][0] == 'uk' and space_info[0][0] in function.supplied_space_list('uk', db, space_type = 'land') and piece_info[0][3] == 'army':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 231 and piece_info[0][0] in ['uk','us'] and space_info[0][0] in list(set(function.within('Allies', function.control_supplied_space_list('uk', db, space_type = 'land'), 1, db)) & set(function.supplied_space_list(piece_info[0][0], db, space_type = 'sea'))) and piece_info[0][3] == 'navy':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 234 and piece_info[0][0] in ['uk','su','us','fr','ch'] and space_info[0][0] in list(set([8,9]) & set(function.supplied_space_list(piece_info[0][0], db, space_type = 'sea'))) and piece_info[0][3] == 'navy':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 242 and piece_info[0][0] == 'fr' and space_info[0][0] == 12 and piece_info[0][3] == 'army':
keyboard.append([InlineKeyboardButton(card[1], callback_data="[status_remove'', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 286 and piece_info[0][0] == 'su' and space_info[0][0] in [30,31]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 288 and piece_info[0][0] == 'su' and space_info[0][0] == 20:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 289 and piece_info[0][0] == 'su' and space_info[0][0] == 28:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 291 and piece_info[0][0] == 'su' and space_info[0][0] in[24,28]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 292 and piece_info[0][0] == 'su' and space_info[0][0] == 24:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 296 and piece_info[0][0] == 'su' and piece_count == 0:
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_remove', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 350 and piece_info[0][0] == 'us' and piece_info[0][3] == 'navy' and space_info[0][0] in function.supplied_space_list('us', db, space_type = 'sea'):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_remove', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':session.handler_list[handler_id].card_id}).fetchall()
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_remove', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
if len(card_name) > 0:
text = "<b>[" + card_name[0][0] + "]</b> - " + function.countryid2name[country] + " - " + function.countryid2name[piece_info[0][0]] + " piece in " + space_info[0][2] + " removed"
else:
text = function.countryid2name[country] + " - " + function.countryid2name[piece_info[0][0]] + " piece in " + space_info[0][2] + " removed"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Recuit-----------------------
def status_recuit_handler(bot, active_country, session):
print('in status_recuit_handler - ' + active_country)
db = sqlite3.connect(session.get_db_dir())
s = [347]
questionmarks = '?' * len(s)
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s)).fetchall()
if len(avaliable_card) > 0:
for card in avaliable_card:
if card[0] == 347 and active_country =='ch':
cardfunction.c347(bot, session)
db.commit()
def status_recuit_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_recuit_handler_info - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[233], 'su':[290], 'us':[], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
space_info = db.execute("select distinct spaceid, type, name from space where spaceid = :space", {'space':session.handler_list[handler_id].space_id}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'uk':
if card[0] == 233 and active_country in ['ge', 'jp', 'it'] and space_info[0][0] in [1,32,41]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_recuit', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 290 and active_country in ['ge', 'jp', 'it'] and space_info[0][0] in [20,24,28,30,31]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_recuit', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':session.handler_list[handler_id].card_id}).fetchall()
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_recuit', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = "<b>[" + card_name[0][0] + "]</b> - " + function.countryid2name[country] + " - " + function.countryid2name[active_country] + " recuit in " + space_info[0][2]
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#--------------------------------------------Deploy/Marshal---------------------------------------------
def status_deploy_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_deploy_handler_info - ' + country)
s = {'ge': [63,64], 'jp':[124, 126], 'it':[177], 'uk':[], 'su':[300], 'us':[370], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
space_info = db.execute("select distinct spaceid, type, name from space where spaceid = :space", {'space':session.handler_list[handler_id].space_id}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 63 and active_country == 'ge':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_deploy', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 64 and active_country == 'ge':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_deploy', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'jp':
if card[0] == 124 and active_country == 'jp' and space_info[0][1] == 'sea':
if response_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_deploy', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Response card in hand', callback_data="['status_deploy', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if card[0] == 126 and active_country == 'jp':
if response_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_deploy', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Response card in hand', callback_data="['status_deploy', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'it':
if card[0] == 177 and active_country == 'it':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_deploy', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 300 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_deploy', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_deploy', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 370 and active_country == 'us':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_deploy', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':session.handler_list[handler_id].card_id}).fetchall()
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_deploy', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
if len(card_name) > 0:
text = "<b>[" + card_name[0][0] + "]</b> - " + function.countryid2name[country] + " - " + function.countryid2name[active_country] + " deploy/marshal in " + space_info[0][2]
else:
text = function.countryid2name[country] + " - " + function.countryid2name[active_country] + " deploy/marshal in " + space_info[0][2]
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Play Step-----------------------
def status_before_play_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_before_play_handler_info - ' + country)
s = {'ge': [59, 60, 62, 66], 'jp':[100, 103, 105, 108, 113, 127], 'it':[175], 'uk':[243, 244], 'su':[297], 'us':[365], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'ge':
air_count = db.execute("select count(*) from piece where control ='ge' and type = 'air' and location != 'none';").fetchall()[0][0]
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'it':
navy_count = db.execute("select count(*) from piece where control ='it' and type = 'navy' and location != 'none';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
if country == 'us':
bs_count = db.execute("select count(*) from card where location = 'hand' and control ='us' and type = 'Bolster';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 59 and active_country == 'ge':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 60 and active_country == 'ge':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 62 and active_country == 'ge':
if air_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Air Force on the board', callback_data="['status_before_play', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if card[0] == 66 and active_country == 'ge':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'jp':
if card[0] == 100 and active_country == 'jp':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 103 and active_country == 'jp':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 105 and active_country == 'jp':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 108 and active_country == 'jp':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 113 and active_country == 'jp':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 127 and active_country == 'jp':
if response_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Response card in hand', callback_data="['status_before_play', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'it':
if card[0] == 175 and active_country == 'it' and navy_count != 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'uk':
if card[0] == 243:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 244 and active_country == 'uk':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 297 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_before_play', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 365 and active_country == 'us':
if bs_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_before_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Bolster in hand', callback_data="['status_before_play', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_before_play', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - Beginning of " + function.countryid2name[active_country] + " Play step"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
def status_play_keyboard(country, db):
print('in status_play_keyboard - ' + country)
s = {'ge': [44], 'jp':[], 'it':[161], 'uk':[], 'su':[278, 279], 'us':[345], 'fr':[], 'ch':[]}
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
extra_keyboard = []
if country == 'it':
dis_land_battle_count = db.execute("select count(*) from card where name = 'Land Battle' and location in ('played','discardd') and control = 'it';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
if country == 'us':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='us' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 44 and country == 'ge':
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1], callback_data="['status_play', '{}', {}]".format(country, card[0]))])
if country == 'it':
if card[0] == 161 and country == 'it':
if dis_land_battle_count > 0:
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1], callback_data="['status_play', '{}', {}]".format(country, card[0]))])
else:
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1] + ' - No Land Battle in discard', callback_data="['status_play', '{}', 'no_play', {}]".format(country, card[0]))])
if country == 'su':
if card[0] == 278 and country == 'su':
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1], callback_data="['status_play', '{}', {}]".format(country, card[0]))])
if card[0] == 279 and country == 'su':
if ba_count > 0:
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1], callback_data="['status_play', '{}', {}]".format(country, card[0]))])
else:
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1] + ' - No Build Army in hand', callback_data="['status_play', '{}', 'no_play', {}]".format(country, card[0]))])
if country == 'us':
if card[0] == 345 and country == 'us':
if ba_count > 0:
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1], callback_data="['status_play', '{}', {}]".format(country, card[0]))])
else:
extra_keyboard.append([InlineKeyboardButton("Status - " + card[1] + ' - No Build Army in hand', callback_data="['status_play', '{}', 'no_play', {}]".format(country, card[0]))])
return extra_keyboard
else:
return None
def status_after_play_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_after_play_handler_info - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[227], 'su':[285], 'us':[], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
played_card = session.handler_list[handler_id].card_id
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':played_card}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
for card in avaliable_card:
if country == 'uk':
if card[0] == 227 and active_country == 'uk':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_after_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 285 and active_country == 'su' and played_card in [246,247,248,249,250,251,252,253,254]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_after_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_after_play', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + function.countryid2name[active_country] + " played " + card_name[0][0]
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
def status_play_status_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_play_status_handler_info - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[241], 'su':[], 'us':[], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
played_card = session.handler_list[handler_id].card_id
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':played_card}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
for card in avaliable_card:
if country == 'uk':
if card[0] == 241 and active_country in ['ge', 'jp', 'it']:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_after_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_after_play', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + function.countryid2name[active_country] + " use " + card_name[0][0]
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
def status_play_bolster_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_play_bolster_handler_info - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[239], 'su':[], 'us':[], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
played_card = session.handler_list[handler_id].card_id
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':played_card}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
for card in avaliable_card:
if country == 'uk':
if card[0] == 239 and active_country in ['ge', 'it']:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_after_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_after_play', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + function.countryid2name[active_country] + " use " + card_name[0][0]
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Air Step-----------------------
def status_air_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_air_handler_info - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[238], 'su':[299], 'us':[364], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
played_card = session.handler_list[handler_id].card_id
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':played_card}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'uk':
if card[0] == 238 and active_country == 'uk':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_after_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 299 and active_country == 'su':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_after_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 364 and active_country == 'us':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_after_play', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_after_play', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - Beginning of " + function.countryid2name[active_country] + " Air step"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Victory Step-----------------------
def status_victory_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_victory_handler_info - ' + country)
s = {'ge': [65], 'jp':[122, 125], 'it':[176, 180, 181], 'uk':[240, 245], 'su':[302], 'us':[368, 369], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 65 and active_country == 'ge':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'jp':
if card[0] == 122 and active_country == 'jp':
if response_count > 0:
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton("Bolster - " + card[1] + ' - No Response card in hand', callback_data="['status_victory', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if card[0] == 125 and active_country == 'jp':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'it':
if card[0] == 176 and active_country == 'it':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 180 and active_country == 'it':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 181 and active_country == 'it':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'uk':
if card[0] == 240 and active_country == 'uk':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 245 and active_country == 'uk':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 302 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_victory', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 368 and active_country == 'us':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 369 and active_country == 'us':
keyboard.append([InlineKeyboardButton("Bolster - " + card[1], callback_data="['status_victory', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_victory', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - Beginning of " + function.countryid2name[active_country] + " Victory step"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
def status_extra_victory_point(country, db):
print('in status_extra_victory_point - ' + country)
s = {'ge': [40, 49], 'jp':[91, 92, 93, 94, 95, 96], 'it':[159, 160, 162, 163, 164], 'uk':[], 'su':[], 'us':[], 'fr':[], 'ch':[]}
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
text = ""
extra_point = 0
for card in avaliable_card:
if country == 'ge':
if card[0] == 40 and 24 in function.control_space_list('ge', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 49 and 15 in function.control_space_list('ge', db):
if 11 in function.control_space_list('ge', db):
text += function.countryid2name[country] + " gain 2 point from <b>" + card[1] + "</b>\n"
extra_point += 2
else:
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if country == 'jp':
if card[0] == 91:
navy_count = db.execute("select count(*) from piece where control = 'jp' and type = 'navy' and location != 'none';").fetchall()
if navy_count[0][0] >= 3:
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 92:
c92_point = 0
if 48 in function.control_space_list('jp', db):
c92_point += 1
if 49 in function.control_space_list('jp', db):
c92_point += 1
if 51 in function.control_space_list('jp', db):
c92_point += 1
if c92_point > 0:
text += function.countryid2name[country] + " gain " + str(c92_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c92_point
if card[0] == 93:
c93_point = 0
if 33 in function.control_space_list('jp', db):
c93_point += 1
if 36 in function.control_space_list('jp', db):
c93_point += 1
if 45 in function.control_space_list('jp', db):
c93_point += 1
if c93_point > 0:
text += function.countryid2name[country] + " gain " + str(c93_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c93_point
if card[0] == 94:
c94_point = 0
if 30 in function.control_space_list('jp', db):
c94_point += 1
if 42 in function.control_space_list('jp', db):
c94_point += 1
if c94_point > 0:
text += function.countryid2name[country] + " gain " + str(c94_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c94_point
if card[0] == 95 and not {39, 46}.isdisjoint(set(function.control_space_list('jp', db))):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 96 and 44 in function.control_space_list('jp', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if country == 'it':
if card[0] == 159 and not {20, 24}.isdisjoint(set(function.control_space_list('it', db))):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 160 and 22 in function.control_space_list('it', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 162:
c162_point = 0
if 13 in function.control_space_list('it', db) or 13 in function.control_space_list('ge', db):
c162_point += 1
if 19 in function.control_space_list('it', db) or 19 in function.control_space_list('ge', db):
c162_point += 1
if 25 in function.control_space_list('it', db) or 25 in function.control_space_list('ge', db):
c162_point += 1
if c162_point > 0:
text += function.countryid2name[country] + " gain " + str(c162_point) + " point from <b>" + card[1] + "</b>\n"
extra_point += c162_point
if card[0] == 163 and 12 in function.control_space_list('it', db):
text += function.countryid2name[country] + " gain 1 point from <b>" + card[1] + "</b>\n"
extra_point += 1
if card[0] == 164:
navy_count = db.execute("select count(*) from piece where control = 'it' and type = 'navy' and location != 'none';").fetchall()
text += function.countryid2name[country] + " gain " + str(navy_count[0][0]) + " point from <b>" + card[1] + "</b>\n"
extra_point += navy_count[0][0]
if extra_point > 0:
return extra_point, text
else:
return None
#----------------------Draw Step-----------------------
def status_draw_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_draw_handler_info - ' + country)
s = {'ge': [48], 'jp':[], 'it':[], 'uk':[], 'su':[295, 298], 'us':[366], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 48 and active_country == 'ge':
card_count = db.execute("select count(*) from card where location = 'deck' and control = 'ge';").fetchall()
if card_count[0][0] != 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'su':
if card[0] == 295 and active_country == 'su':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 298 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_draw', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 366 and active_country == 'us':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_draw', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_draw', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + "Beginning of " + function.countryid2name[active_country] + " Draw step:"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Discard Step-----------------------
def status_discard_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_discard_handler_info - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[], 'su':[301], 'us':[351], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
active_country = session.handler_list[handler_id].active_country_id
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'jp':
response_count = db.execute("select count(*) from card where location = 'hand' and control ='jp' and type = 'Response';").fetchall()[0][0]
if country == 'su':
ba_count = db.execute("select count(*) from card where location = 'hand' and control ='su' and type = 'Build Army';").fetchall()[0][0]
for card in avaliable_card:
if country == 'su':
if card[0] == 301 and active_country == 'su':
if ba_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_discard', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Build Army in hand', callback_data="['status_discard', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 351 and active_country == 'us':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_discard', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_discard', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + "Beginning of " + function.countryid2name[active_country] + " Discard step:"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Build Location-----------------------
def status_build_location(country, db):
print('in status_build_location - ' + country)
s = {'ge': [], 'jp':[], 'it':[], 'uk':[220, 224, 226], 'su':[], 'us':[], 'fr':[228], 'ch':[345]}
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
extra_space_list = []
for card in avaliable_card:
if country == 'uk':
if card[0] == 220:
extra_space_list.append(41)
if card[0] == 224:
extra_space_list.append(32)
if card[0] == 226:
extra_space_list.append(21)
if country == 'fr':
if card[0] == 228:
extra_space_list.append(13)
return extra_space_list
#----------------------Battle Location-----------------------
def status_battle_location(country, db):
print('in status_battle_location - ' + country)
s = {'ge': [51], 'jp':[], 'it':[], 'uk':[], 'su':[], 'us':[], 'fr':[], 'ch':[]}
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
extra_space_list = []
for card in avaliable_card:
if country == 'ge':
if card[0] == 39:
extra_space_list.append(16)
return extra_space_list
#----------------------VP Location-----------------------
def status_vp_location(country, space_list, db):
print('in status_vp_location - ' + country)
s = {'ge': [281], 'jp':[], 'it':[281], 'uk':[225, 226], 'su':[277],'us':[], 'fr':[228], 'ch':[345]}
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
extra_space_list = []
for card in avaliable_card:
if country in ['ge', 'it']:
if card[0] == 281 and 24 in space_list:
space_list.remove(24)
if country == 'uk':
if card[0] == 225:
space_list.append(1)
if card[0] == 226:
space_list.append(21)
if country == 'su':
if card[0] == 277:
space_list.append(30)
if country == 'fr':
if card[0] == 228:
space_list.append(13)
if country == 'ch':
if card[0] == 345:
space_list.append(35)
return space_list
#----------------------Supply-----------------------
def status_supply(db):
print('in status_supply')
s = [221, 279, 281, 283]
questionmarks = '?' * len(s)
avaliable_card = db.execute("select cardid, name from card where location in ('played', 'turn') and cardid in ({});".format(','.join(questionmarks)), (s)).fetchall()
if len(avaliable_card) > 0:
for card in avaliable_card:
if card[0] == 221:
db.execute("update piece set supply = 1 where control = 'fr';")
if card[0] == 279:
db.execute("update piece set supply = 1 where control = 'ch' and type = 'army';")
if card[0] == 281:
db.execute("update piece set supply = 0 where control in ('ge','it') and location = '24';")
if card[0] == 283:
db.execute("update piece set supply = 1 where control = 'su' and type = 'army';")
if cardfunction.c62_used:
db.execute("update piece set supply = 1 where control = 'ge';")
db.commit()
def status_supply_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_supply_handler_info - ' + country)
s = {'ge': [], 'jp':[114], 'it':[], 'uk':[], 'su':[], 'us':[], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
for card in avaliable_card:
if country == 'jp':
if card[0] == 114:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_supply', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_supply', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[country] + " - " + "Supply step:"
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
#----------------------Economic Warfare-----------------------
def status_ew_handler(bot, cardid, active_country, passive_country, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_ew_handler - ' + active_country)
s = [39, 46, 53, 349]
card_name = db.execute("select name from card where cardid = :cardid;", {'cardid':cardid}).fetchall()
questionmarks = '?' * len(s)
avaliable_card = db.execute("select cardid, name from card where location = 'played' and cardid in ({});".format(','.join(questionmarks)), (s)).fetchall()
extra_number = 0
if len(avaliable_card) > 0:
for card in avaliable_card:
if card[0] == 39 and passive_country == 'ge':
extra_number -= 2
if card[0] == 46 and passive_country == 'ge':
cardfunction.c46(bot, active_country, session)
if card[0] == 53 and active_country == 'ge' and 'Submarine' in card_name[0][0]:
if 11 in function.control_space_list('ge', db):
extra_number += 2
else:
extra_number += 1
if card[0] == 349 and active_country == 'us':
extra_number += 1
if cardfunction.c62_used:
extra_number -= 4
cardfunction.c62_used = False
return extra_number
def status_ew_handler_info(country, handler_id, session):
db = sqlite3.connect(session.get_db_dir())
print('in status_ew_handler_info - ' + country)
s = {'ge': [61, 67], 'jp':[123], 'it':[171, 174, 179], 'uk':[], 'su':[], 'us':[367], 'fr':[], 'ch':[]}
chat_id = db.execute("select playerid from country where id = :id;",{'id':country}).fetchall()
passive_country = session.handler_list[handler_id].passive_country_id
active_country = session.handler_list[handler_id].active_country_id
played_card = session.handler_list[handler_id].card_id
card_name = db.execute("select name from card where cardid = :cardid;",{'cardid':played_card}).fetchall()
questionmarks = '?' * len(s[country])
avaliable_card = db.execute("select cardid, name from card where (location = 'played' or (location = 'hand' and type = 'Bolster')) and cardid in ({});".format(','.join(questionmarks)), (s[country])).fetchall()
if len(avaliable_card) > 0:
keyboard = []
if country == 'it':
air_count = db.execute("select count(*) from piece where control ='it' and type = 'air' and location != 'none';").fetchall()[0][0]
for card in avaliable_card:
if country == 'ge':
if card[0] == 61 and active_country == 'ge' and 'Submarine' in card_name[0][0]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_ew', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 67 and active_country == 'ge':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_ew', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'jp':
if card[0] == 123 and passive_country == 'jp' and 38 in function.control_air_space_list('jp', db):
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_ew', '{}', {}, {}]".format(country, handler_id, card[0]))])
if country == 'it':
if card[0] == 171 and passive_country == 'it' and 'Bomb' in card_name[0][0]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_ew', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 174 and active_country == 'ge' and 'Submarine' in card_name[0][0]:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_ew', '{}', {}, {}]".format(country, handler_id, card[0]))])
if card[0] == 179 and passive_country == 'it':
if air_count > 0:
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_ew', '{}', {}, {}]".format(country, handler_id, card[0]))])
else:
keyboard.append([InlineKeyboardButton(card[1] + ' - No Air Force on the board', callback_data="['status_ew', '{}', {}, 'no_play', {}]".format(country, handler_id, card[0]))])
if country == 'us':
if card[0] == 367 and active_country == 'us':
keyboard.append([InlineKeyboardButton(card[1], callback_data="['status_ew', '{}', {}, {}]".format(country, handler_id, card[0]))])
if len(keyboard) > 0:
keyboard.append([InlineKeyboardButton('Pass', callback_data="['status_ew', '{}', {}, 'pass']".format(country, handler_id))])
reply_markup = InlineKeyboardMarkup(keyboard)
text = function.countryid2name[passive_country] + " is attacked by " + card_name[0][0]
else:
reply_markup = None
text = None
else:
reply_markup = None
text = None
return chat_id[0][0], text, reply_markup
info_list = {'Battle':status_battle_handler_info,
'Build':status_build_handler_info,
'Remove':status_remove_handler_info,
'Recruit':status_recuit_handler_info,
'Beginning of Play step':status_before_play_handler_info,
'After Playing a card':status_after_play_handler_info,
'Using Status':status_play_status_handler_info,
'Using Bolster':status_play_bolster_handler_info,
'Beginning of Air step':status_air_handler_info,
'Beginning of Victory step':status_victory_handler_info,
'Beginning of Draw step':status_draw_handler_info,
'Beginning of Discard step':status_discard_handler_info,
'Checking Supply':status_supply_handler_info,
'Economic Warfare':status_ew_handler_info,
'Deploy/Marshal':status_deploy_handler_info
}
| 75.076753
| 394
| 0.575382
| 12,238
| 101,729
| 4.600752
| 0.03293
| 0.030904
| 0.051719
| 0.06369
| 0.898995
| 0.881323
| 0.859868
| 0.851822
| 0.831646
| 0.811523
| 0
| 0.031938
| 0.245004
| 101,729
| 1,354
| 395
| 75.132201
| 0.701139
| 0.020112
| 0
| 0.63876
| 0
| 0.037209
| 0.186682
| 0.012092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021705
| false
| 0.044961
| 0.008527
| 0
| 0.051163
| 0.026357
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7df3ff6948463e3298bdb76accca2358d38d5af0
| 117
|
py
|
Python
|
src/core/utils.py
|
brianl9995/payinv
|
7fc2160c2c9bbb9568a659ff3edf2526142d33fc
|
[
"MIT"
] | 2
|
2019-09-21T23:36:49.000Z
|
2019-10-02T23:31:21.000Z
|
src/core/utils.py
|
brianl9995/payinv
|
7fc2160c2c9bbb9568a659ff3edf2526142d33fc
|
[
"MIT"
] | 2
|
2019-10-04T13:51:43.000Z
|
2021-06-10T21:57:55.000Z
|
src/core/utils.py
|
brianl9995/payinv
|
7fc2160c2c9bbb9568a659ff3edf2526142d33fc
|
[
"MIT"
] | 2
|
2019-10-02T23:31:22.000Z
|
2020-06-07T14:57:55.000Z
|
from django.utils.translation import ugettext as _
def yes_or_no(value):
return _('Yes') if value else _('No')
| 19.5
| 50
| 0.717949
| 18
| 117
| 4.388889
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17094
| 117
| 5
| 51
| 23.4
| 0.814433
| 0
| 0
| 0
| 0
| 0
| 0.042735
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
b445225f04869827e06dd8a4a7e09dcfe72ca55e
| 58,642
|
py
|
Python
|
spiketoolkit/validation/quality_metrics.py
|
teristam/spiketoolk
|
0ae7adabce46cf620c3627ee0093d890996ef355
|
[
"MIT"
] | 55
|
2018-11-26T21:57:45.000Z
|
2021-06-14T15:27:50.000Z
|
spiketoolkit/validation/quality_metrics.py
|
teristam/spiketoolk
|
0ae7adabce46cf620c3627ee0093d890996ef355
|
[
"MIT"
] | 364
|
2018-11-26T21:57:08.000Z
|
2021-07-27T12:29:28.000Z
|
spiketoolkit/validation/quality_metrics.py
|
teristam/spiketoolk
|
0ae7adabce46cf620c3627ee0093d890996ef355
|
[
"MIT"
] | 40
|
2018-11-23T12:33:44.000Z
|
2021-09-28T10:27:07.000Z
|
from .quality_metric_classes.metric_data import MetricData
from .quality_metric_classes.amplitude_cutoff import AmplitudeCutoff
from .quality_metric_classes.silhouette_score import SilhouetteScore
from .quality_metric_classes.num_spikes import NumSpikes
from .quality_metric_classes.firing_rate import FiringRate
from .quality_metric_classes.d_prime import DPrime
from .quality_metric_classes.l_ratio import LRatio
from .quality_metric_classes.presence_ratio import PresenceRatio
from .quality_metric_classes.isi_violation import ISIViolation
from .quality_metric_classes.snr import SNR
from .quality_metric_classes.isolation_distance import IsolationDistance
from .quality_metric_classes.noise_overlap import NoiseOverlap
from .quality_metric_classes.nearest_neighbor import NearestNeighbor
from .quality_metric_classes.drift_metric import DriftMetric
from .quality_metric_classes.parameter_dictionaries import update_all_param_dicts_with_kwargs
from collections import OrderedDict
from copy import deepcopy
import pandas
all_metrics_list = ["num_spikes", "firing_rate", "presence_ratio", "isi_violation", "amplitude_cutoff", "snr",
"max_drift", "cumulative_drift", "silhouette_score", "isolation_distance", "l_ratio",
"d_prime", "noise_overlap", "nn_hit_rate", "nn_miss_rate"]
def get_quality_metrics_list():
return all_metrics_list
def compute_num_spikes(
sorting,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the num spikes for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
num_spikes: np.ndarray
The number of spikes of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'], raise_if_empty=False)
ns = NumSpikes(metric_data=md)
num_spikes = ns.compute_metric(**kwargs)
return num_spikes
def compute_firing_rates(
sorting,
duration_in_frames,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the firing rates for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
firing_rates: np.ndarray
The firing rates of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
fr = FiringRate(metric_data=md)
firing_rates = fr.compute_metric(**kwargs)
return firing_rates
def compute_presence_ratios(
sorting,
duration_in_frames,
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the presence ratios for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
presence_ratios: np.ndarray
The presence ratios of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
pr = PresenceRatio(metric_data=md)
presence_ratios = pr.compute_metric(**kwargs)
return presence_ratios
def compute_isi_violations(
sorting,
duration_in_frames,
isi_threshold=ISIViolation.params['isi_threshold'],
min_isi=ISIViolation.params['min_isi'],
sampling_frequency=None,
unit_ids=None,
**kwargs
):
"""
Computes and returns the isi violations for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
duration_in_frames: int
Length of recording (in frames).
isi_threshold: float
The isi threshold for calculating isi violations
min_isi: float
The minimum expected isi value
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
save_property_or_features: bool
If True, the metric is saved as sorting property
verbose: bool
If True, will be verbose in metric computation
Returns
----------
isi_violations: np.ndarray
The isi violations of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=None,
apply_filter=False, freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
iv = ISIViolation(metric_data=md)
isi_violations = iv.compute_metric(isi_threshold, min_isi, **kwargs)
return isi_violations
def compute_amplitude_cutoffs(
sorting,
recording,
unit_ids=None,
**kwargs
):
"""
Computes and returns the amplitude cutoffs for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
apply_filter: bool
If True, recording is bandpass-filtered.
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
save_property_or_features: bool
If true, it will save amplitudes in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes.
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: float
Frames after peak to compute amplitude
save_property_or_features: bool
If True, the metric is saved as sorting property
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
amplitude_cutoffs: np.ndarray
The amplitude cutoffs of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_amplitudes(**kwargs)
ac = AmplitudeCutoff(metric_data=md)
amplitude_cutoffs = ac.compute_metric(**kwargs)
return amplitude_cutoffs
def compute_snrs(
sorting,
recording,
snr_mode=SNR.params['snr_mode'],
snr_noise_duration=SNR.params['snr_noise_duration'],
max_spikes_per_unit_for_snr=SNR.params['max_spikes_per_unit_for_snr'],
template_mode=SNR.params['template_mode'],
max_channel_peak=SNR.params['max_channel_peak'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the snrs in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
snr_mode: str
Mode to compute noise SNR ('mad' | 'std' - default 'mad')
snr_noise_duration: float
Number of seconds to compute noise level from (default 10.0)
max_spikes_per_unit_for_snr: int
Maximum number of spikes to compute templates from (default 1000)
template_mode: str
Use 'mean' or 'median' to compute templates
max_channel_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
snrs: np.ndarray
The snrs of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
duration_in_frames=None, freq_max=params_dict["freq_max"], unit_ids=unit_ids,
verbose=params_dict['verbose'])
snr = SNR(metric_data=md)
snrs = snr.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, **kwargs)
return snrs
def compute_noise_overlaps(
sorting,
recording,
num_channels_to_compare=NoiseOverlap.params['num_channels_to_compare'],
num_features=NoiseOverlap.params['num_features'],
num_knn=NoiseOverlap.params['num_knn'],
max_spikes_per_unit_for_noise_overlap=NoiseOverlap.params['max_spikes_per_unit_for_noise_overlap'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the noise overlaps in the sorted dataset.
Noise overlap estimates the fraction of ‘‘noise events’’ in a cluster, i.e., above-threshold events not associated
with true firings of this or any of the other clustered units. A large noise overlap implies a high false-positive
rate.
Implementation from ml_ms4alg. For more information see https://doi.org/10.1016/j.neuron.2017.08.030
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_features: int
Number of features to use for PCA
num_knn: int
Number of nearest neighbors
max_spikes_per_unit_for_noise_overlap: int
Number of waveforms to use for noise overlaps estimation
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
noise_overlaps: np.ndarray
The noise_overlaps of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
duration_in_frames=None, freq_max=params_dict["freq_max"], unit_ids=unit_ids,
verbose=params_dict['verbose'])
noise_overlap = NoiseOverlap(metric_data=md)
noise_overlaps = noise_overlap.compute_metric(num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
num_features, num_knn, **kwargs)
return noise_overlaps
def compute_silhouette_scores(
sorting,
recording,
max_spikes_for_silhouette=SilhouetteScore.params['max_spikes_for_silhouette'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the silhouette scores in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
max_spikes_for_silhouette: int
Max spikes to be used for silhouette metric
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
silhouette_scores: np.ndarray
The sihouette scores of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
duration_in_frames=None, freq_max=params_dict["freq_max"], unit_ids=unit_ids,
verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
silhouette_score = SilhouetteScore(metric_data=md)
silhouette_scores = silhouette_score.compute_metric(max_spikes_for_silhouette, **kwargs)
return silhouette_scores
def compute_d_primes(
sorting,
recording,
num_channels_to_compare=DPrime.params['num_channels_to_compare'],
max_spikes_per_cluster=DPrime.params['max_spikes_per_cluster'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the d primes in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
d_primes: np.ndarray
The d primes of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
d_prime = DPrime(metric_data=md)
d_primes = d_prime.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
return d_primes
def compute_l_ratios(
sorting,
recording,
num_channels_to_compare=LRatio.params['num_channels_to_compare'],
max_spikes_per_cluster=LRatio.params['max_spikes_per_cluster'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the l ratios in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
l_ratios: np.ndarray
The l ratios of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
l_ratio = LRatio(metric_data=md)
l_ratios = l_ratio.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
return l_ratios
def compute_isolation_distances(
sorting,
recording,
num_channels_to_compare=IsolationDistance.params['num_channels_to_compare'],
max_spikes_per_cluster=IsolationDistance.params['max_spikes_per_cluster'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the isolation distances in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
isolation_distances: np.ndarray
The isolation distances of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
isolation_distance = IsolationDistance(metric_data=md)
isolation_distances = isolation_distance.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
**kwargs)
return isolation_distances
def compute_nn_metrics(
sorting,
recording,
num_channels_to_compare=NearestNeighbor.params['num_channels_to_compare'],
max_spikes_per_cluster=NearestNeighbor.params['max_spikes_per_cluster'],
max_spikes_for_nn=NearestNeighbor.params['max_spikes_for_nn'],
n_neighbors=NearestNeighbor.params['n_neighbors'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the nearest neighbor metrics in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
max_spikes_for_nn: int
Max spikes to be used for nearest-neighbors calculation
n_neighbors: int
Number of neighbors to compare
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
nn_metrics: np.ndarray
The nearest neighbor metrics of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
nn = NearestNeighbor(metric_data=md)
nn_metrics = nn.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
max_spikes_for_nn, n_neighbors, **kwargs)
return nn_metrics
def compute_drift_metrics(
sorting,
recording,
drift_metrics_interval_s=DriftMetric.params['drift_metrics_interval_s'],
drift_metrics_min_spikes_per_interval=DriftMetric.params['drift_metrics_min_spikes_per_interval'],
unit_ids=None,
**kwargs
):
"""
Computes and returns the drift metrics in the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
drift_metrics_interval_s: float
Time period for evaluating drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval.
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
dm_metrics: np.ndarray
The drift metrics of the sorted units.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=recording.get_sampling_frequency(), recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=None, verbose=params_dict['verbose'])
md.compute_pca_scores(**kwargs)
dm = DriftMetric(metric_data=md)
dm_metrics = dm.compute_metric(drift_metrics_interval_s, drift_metrics_min_spikes_per_interval, **kwargs)
return dm_metrics
def compute_quality_metrics(
sorting,
recording=None,
duration_in_frames=None,
sampling_frequency=None,
metric_names=None,
unit_ids=None,
as_dataframe=False,
isi_threshold=ISIViolation.params['isi_threshold'],
min_isi=ISIViolation.params['min_isi'],
snr_mode=SNR.params['snr_mode'],
snr_noise_duration=SNR.params['snr_noise_duration'],
max_spikes_per_unit_for_snr=SNR.params['max_spikes_per_unit_for_snr'],
template_mode=SNR.params['template_mode'],
max_channel_peak=SNR.params['max_channel_peak'],
max_spikes_per_unit_for_noise_overlap=NoiseOverlap.params['max_spikes_per_unit_for_noise_overlap'],
noise_overlap_num_features=NoiseOverlap.params['num_features'],
noise_overlap_num_knn=NoiseOverlap.params['num_knn'],
drift_metrics_interval_s=DriftMetric.params['drift_metrics_interval_s'],
drift_metrics_min_spikes_per_interval=DriftMetric.params['drift_metrics_min_spikes_per_interval'],
max_spikes_for_silhouette=SilhouetteScore.params['max_spikes_for_silhouette'],
num_channels_to_compare=13,
max_spikes_per_cluster=500,
max_spikes_for_nn=NearestNeighbor.params['max_spikes_for_nn'],
n_neighbors=NearestNeighbor.params['n_neighbors'],
**kwargs
):
"""
Computes and returns all specified metrics for the sorted dataset.
Parameters
----------
sorting: SortingExtractor
The sorting result to be evaluated.
recording: RecordingExtractor
The given recording extractor from which to extract amplitudes
duration_in_frames: int
Length of recording (in frames).
sampling_frequency: float
The sampling frequency of the result. If None, will check to see if sampling frequency is in sorting extractor
metric_names: list
List of metric names to be computed
unit_ids: list
List of unit ids to compute metric for. If not specified, all units are used
as_dataframe: bool
If True, will return dataframe of metrics. If False, will return dictionary.
isi_threshold: float
The isi threshold for calculating isi violations
min_isi: float
The minimum expected isi value
snr_mode: str
Mode to compute noise SNR ('mad' | 'std' - default 'mad')
snr_noise_duration: float
Number of seconds to compute noise level from (default 10.0)
max_spikes_per_unit_for_snr: int
Maximum number of spikes to compute templates for SNR from (default 1000)
template_mode: str
Use 'mean' or 'median' to compute templates
max_channel_peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)
max_spikes_per_unit_for_noise_overlap: int
Maximum number of spikes to compute templates for noise overlap from (default 1000)
noise_overlap_num_features: int
Number of features to use for PCA for noise overlap
noise_overlap_num_knn: int
Number of nearest neighbors for noise overlap
drift_metrics_interval_s: float
Time period for evaluating drift.
drift_metrics_min_spikes_per_interval: int
Minimum number of spikes for evaluating drift metrics per interval
max_spikes_for_silhouette: int
Max spikes to be used for silhouette metric
num_channels_to_compare: int
The number of channels to be used for the PC extraction and comparison
max_spikes_per_cluster: int
Max spikes to be used from each unit
max_spikes_for_nn: int
Max spikes to be used for nearest-neighbors calculation
n_neighbors: int
Number of neighbors to compare
**kwargs: keyword arguments
Keyword arguments among the following:
method: str
If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned.
If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes
peak: str
If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or
both ('both' - default)
frames_before: int
Frames before peak to compute amplitude
frames_after: int
Frames after peak to compute amplitude
apply_filter: bool
If True, recording is bandpass-filtered
freq_min: float
High-pass frequency for optional filter (default 300 Hz)
freq_max: float
Low-pass frequency for optional filter (default 6000 Hz)
grouping_property: str
Property to group channels. E.g. if the recording extractor has the 'group' property and
'grouping_property' is 'group', then waveforms are computed group-wise.
ms_before: float
Time period in ms to cut waveforms before the spike events
ms_after: float
Time period in ms to cut waveforms after the spike events
dtype: dtype
The numpy dtype of the waveforms
compute_property_from_recording: bool
If True and 'grouping_property' is given, the property of each unit is assigned as the corresponding
property of the recording extractor channel on which the average waveform is the largest
max_channels_per_waveforms: int or None
Maximum channels per waveforms to return. If None, all channels are returned
n_jobs: int
Number of parallel jobs (default 1)
memmap: bool
If True, waveforms are saved as memmap object (recommended for long recordings with many channels)
save_property_or_features: bool
If true, it will save features in the sorting extractor
recompute_info: bool
If True, waveforms are recomputed
max_spikes_per_unit: int
The maximum number of spikes to extract per unit
seed: int
Random seed for reproducibility
verbose: bool
If True, will be verbose in metric computation
Returns
----------
metrics: dictionary OR pandas.dataframe
Dictionary or pandas.dataframe of metrics.
"""
params_dict = update_all_param_dicts_with_kwargs(kwargs)
metrics_dict = OrderedDict()
if metric_names is None:
metric_names = all_metrics_list
else:
bad_metrics = []
for m in metric_names:
if m not in all_metrics_list:
bad_metrics.append(m)
if len(bad_metrics) > 0:
raise ValueError(f"Improper feature names: {str(bad_metrics)}. The following features names can be "
f"calculated: {str(all_metrics_list)}")
if unit_ids is None:
unit_ids = sorting.get_unit_ids()
md = MetricData(sorting=sorting, sampling_frequency=sampling_frequency, recording=recording,
apply_filter=params_dict["apply_filter"], freq_min=params_dict["freq_min"],
freq_max=params_dict["freq_max"], unit_ids=unit_ids,
duration_in_frames=duration_in_frames, verbose=params_dict['verbose'])
if "firing_rate" in metric_names or "presence_ratio" in metric_names or "isi_violation" in metric_names:
if recording is None and duration_in_frames is None:
raise ValueError(
"duration_in_frames and recording cannot both be None when computing firing_rate, "
"presence_ratio, and isi_violation")
if "max_drift" in metric_names or "cumulative_drift" in metric_names or "silhouette_score" in metric_names \
or "isolation_distance" in metric_names or "l_ratio" in metric_names or "d_prime" in metric_names \
or "nn_hit_rate" in metric_names or "nn_miss_rate" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing max_drift, cumulative_drift, "
"silhouette_score isolation_distance, l_ratio, d_prime, nn_hit_rate, or amplitude_cutoff.")
else:
md.compute_pca_scores(**kwargs)
if "amplitude_cutoff" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing amplitude cutoffs.")
else:
md.compute_amplitudes(**kwargs)
if "snr" in metric_names:
if recording is None:
raise ValueError("The recording cannot be None when computing snr.")
if "num_spikes" in metric_names:
ns = NumSpikes(metric_data=md)
num_spikes = ns.compute_metric(**kwargs)
metrics_dict['num_spikes'] = num_spikes
if "firing_rate" in metric_names:
fr = FiringRate(metric_data=md)
firing_rates = fr.compute_metric(**kwargs)
metrics_dict['firing_rate'] = firing_rates
if "presence_ratio" in metric_names:
pr = PresenceRatio(metric_data=md)
presence_ratios = pr.compute_metric(**kwargs)
metrics_dict['presence_ratio'] = presence_ratios
if "isi_violation" in metric_names:
iv = ISIViolation(metric_data=md)
isi_violations = iv.compute_metric(isi_threshold, min_isi, **kwargs)
metrics_dict['isi_violation'] = isi_violations
if "amplitude_cutoff" in metric_names:
ac = AmplitudeCutoff(metric_data=md)
amplitude_cutoffs = ac.compute_metric(**kwargs)
metrics_dict['amplitude_cutoff'] = amplitude_cutoffs
if "snr" in metric_names:
snr = SNR(metric_data=md)
snrs = snr.compute_metric(snr_mode, snr_noise_duration, max_spikes_per_unit_for_snr,
template_mode, max_channel_peak, **kwargs)
metrics_dict['snr'] = snrs
if "max_drift" in metric_names or "cumulative_drift" in metric_names:
dm = DriftMetric(metric_data=md)
max_drifts, cumulative_drifts = dm.compute_metric(drift_metrics_interval_s,
drift_metrics_min_spikes_per_interval, **kwargs)
if "max_drift" in metric_names:
metrics_dict['max_drift'] = max_drifts
if "cumulative_drift" in metric_names:
metrics_dict['cumulative_drift'] = cumulative_drifts
if "silhouette_score" in metric_names:
silhouette_score = SilhouetteScore(metric_data=md)
silhouette_scores = silhouette_score.compute_metric(max_spikes_for_silhouette, **kwargs)
metrics_dict['silhouette_score'] = silhouette_scores
if "isolation_distance" in metric_names:
isolation_distance = IsolationDistance(metric_data=md)
isolation_distances = isolation_distance.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
**kwargs)
metrics_dict['isolation_distance'] = isolation_distances
if "noise_overlap" in metric_names:
noise_overlap = NoiseOverlap(metric_data=md)
noise_overlaps = noise_overlap.compute_metric(num_channels_to_compare,
max_spikes_per_unit_for_noise_overlap,
noise_overlap_num_features,
noise_overlap_num_knn,
**kwargs)
metrics_dict['noise_overlap'] = noise_overlaps
if "l_ratio" in metric_names:
l_ratio = LRatio(metric_data=md)
l_ratios = l_ratio.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
metrics_dict['l_ratio'] = l_ratios
if "d_prime" in metric_names:
d_prime = DPrime(metric_data=md)
d_primes = d_prime.compute_metric(num_channels_to_compare, max_spikes_per_cluster, **kwargs)
metrics_dict['d_prime'] = d_primes
if "nn_hit_rate" in metric_names or "nn_miss_rate" in metric_names:
nn = NearestNeighbor(metric_data=md)
nn_hit_rates, nn_miss_rates = nn.compute_metric(num_channels_to_compare, max_spikes_per_cluster,
max_spikes_for_nn, n_neighbors, **kwargs)
if "nn_hit_rate" in metric_names:
metrics_dict['nn_hit_rate'] = nn_hit_rates
if "nn_miss_rate" in metric_names:
metrics_dict['nn_miss_rate'] = nn_miss_rates
if as_dataframe:
metrics = pandas.DataFrame.from_dict(metrics_dict)
metrics = metrics.rename(index={original_idx: unit_ids[i] for
i, original_idx in enumerate(range(len(metrics)))})
else:
metrics = metrics_dict
return metrics
| 44.662605
| 120
| 0.652297
| 7,205
| 58,642
| 5.099098
| 0.042748
| 0.02153
| 0.018509
| 0.011323
| 0.883312
| 0.87041
| 0.852555
| 0.848063
| 0.839571
| 0.831487
| 0
| 0.002869
| 0.292811
| 58,642
| 1,312
| 121
| 44.696646
| 0.883027
| 0.582859
| 0
| 0.665037
| 0
| 0
| 0.113447
| 0.024679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036675
| false
| 0
| 0.04401
| 0.002445
| 0.117359
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b4540eaebd9ffd0e09119f8fb6fe760b4481f50e
| 3,544
|
py
|
Python
|
tests/unit_tests/test_tethys_services/test_models/test_SpatialDatasetService.py
|
ezrajrice/tethys
|
238271ebb09913f1f57b0d127fd5c81bb4780a0a
|
[
"BSD-2-Clause"
] | 79
|
2015-10-05T13:13:28.000Z
|
2022-02-01T12:30:33.000Z
|
tests/unit_tests/test_tethys_services/test_models/test_SpatialDatasetService.py
|
ezrajrice/tethys
|
238271ebb09913f1f57b0d127fd5c81bb4780a0a
|
[
"BSD-2-Clause"
] | 542
|
2015-08-12T22:11:32.000Z
|
2022-03-29T22:18:08.000Z
|
tests/unit_tests/test_tethys_services/test_models/test_SpatialDatasetService.py
|
Aquaveo/tethys
|
15f67c3fb9458d3af2733542be5ea6391f33b222
|
[
"BSD-2-Clause"
] | 71
|
2016-01-16T01:03:41.000Z
|
2022-03-31T17:55:54.000Z
|
from tethys_sdk.testing import TethysTestCase
import tethys_services.models as service_model
from unittest import mock
class SpatialDatasetServiceTests(TethysTestCase):
def set_up(self):
pass
def tear_down(self):
pass
def test__str__(self):
sds = service_model.SpatialDatasetService(
name='test_sds',
)
self.assertEqual('test_sds', sds.__str__())
@mock.patch('tethys_services.models.GeoServerSpatialDatasetEngine')
def test_get_engine_geo_server(self, mock_sds):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.GEOSERVER,
endpoint='http://localhost/geoserver/rest/',
public_endpoint='http://publichost/geoserver/rest/',
username='foo',
password='password'
)
sds.save()
ret = sds.get_engine()
# Check result
mock_sds.assert_called_with(endpoint='http://localhost/geoserver/rest/', password='password', username='foo')
self.assertEqual('http://publichost/geoserver/rest/', ret.public_endpoint)
@mock.patch('tethys_services.models.TDSCatalog')
@mock.patch('tethys_services.models.session_manager')
def test_get_engine_thredds(self, mock_session_manager, mock_TDSCatalog):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.THREDDS,
endpoint='http://localhost/thredds/',
public_endpoint='http://publichost/thredds/',
username='foo',
password='password'
)
sds.save()
ret = sds.get_engine()
mock_session_manager.set_session_options.assert_called_with(auth=('foo', 'password'))
mock_TDSCatalog.assert_called_with('http://localhost/thredds/catalog.xml')
# Check result
self.assertEqual(mock_TDSCatalog(), ret)
@mock.patch('tethys_services.models.TDSCatalog')
@mock.patch('tethys_services.models.session_manager')
def test_get_engine_thredds_no_trailing_slashes(self, mock_session_manager, mock_TDSCatalog):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.THREDDS,
endpoint='http://localhost/thredds',
public_endpoint='http://publichost/thredds',
username='foo',
password='password'
)
sds.save()
ret = sds.get_engine()
mock_session_manager.set_session_options.assert_called_with(auth=('foo', 'password'))
mock_TDSCatalog.assert_called_with('http://localhost/thredds/catalog.xml')
# Check result
self.assertEqual(mock_TDSCatalog(), ret)
@mock.patch('tethys_services.models.TDSCatalog')
@mock.patch('tethys_services.models.session_manager')
def test_get_engine_thredds_no_username_password(self, mock_session_manager, mock_TDSCatalog):
sds = service_model.SpatialDatasetService(
name='test_sds',
engine=service_model.SpatialDatasetService.THREDDS,
endpoint='http://localhost/thredds',
public_endpoint='http://publichost/thredds',
)
sds.save()
ret = sds.get_engine()
mock_session_manager.set_session_options.assert_not_called()
mock_TDSCatalog.assert_called_with('http://localhost/thredds/catalog.xml')
# Check result
self.assertEqual(mock_TDSCatalog(), ret)
| 38.107527
| 117
| 0.672686
| 371
| 3,544
| 6.132075
| 0.169811
| 0.052747
| 0.130549
| 0.070769
| 0.780659
| 0.738022
| 0.738022
| 0.717363
| 0.717363
| 0.717363
| 0
| 0
| 0.217551
| 3,544
| 92
| 118
| 38.521739
| 0.820411
| 0.014391
| 0
| 0.643836
| 0
| 0
| 0.21961
| 0.075975
| 0
| 0
| 0
| 0
| 0.164384
| 1
| 0.09589
| false
| 0.123288
| 0.041096
| 0
| 0.150685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
b48656e9cfe3f8ee5351f07e325134a7866c72a3
| 40
|
py
|
Python
|
Lib/test/test_compiler/testcorpus/77_class__class__no_class.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 1,886
|
2021-05-03T23:58:43.000Z
|
2022-03-31T19:15:58.000Z
|
Lib/test/test_compiler/testcorpus/77_class__class__no_class.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 70
|
2021-05-04T23:25:35.000Z
|
2022-03-31T18:42:08.000Z
|
Lib/test/test_compiler/testcorpus/77_class__class__no_class.py
|
diogommartins/cinder
|
79103e9119cbecef3b085ccf2878f00c26e1d175
|
[
"CNRI-Python-GPL-Compatible"
] | 52
|
2021-05-04T21:26:03.000Z
|
2022-03-08T18:02:56.000Z
|
def f():
def g():
__class__
| 10
| 17
| 0.425
| 5
| 40
| 2.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.425
| 40
| 3
| 18
| 13.333333
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.666667
| true
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
81f43ae686bbb957c6c9e01f1cb7ae1d6227155c
| 9,817
|
py
|
Python
|
plasma_framework/python_tests/tests/contracts/root_chain/test_challenge_in_flight_exit_not_canonical.py
|
EgoInc/plasma-contracts
|
849d2706164a96079df42771d083e2ba68d448bd
|
[
"Apache-2.0"
] | 76
|
2018-07-09T12:59:39.000Z
|
2020-05-24T09:19:35.000Z
|
plasma_framework/python_tests/tests/contracts/root_chain/test_challenge_in_flight_exit_not_canonical.py
|
EgoInc/plasma-contracts
|
849d2706164a96079df42771d083e2ba68d448bd
|
[
"Apache-2.0"
] | 441
|
2018-07-04T13:31:50.000Z
|
2020-05-28T02:13:55.000Z
|
plasma_framework/python_tests/tests/contracts/root_chain/test_challenge_in_flight_exit_not_canonical.py
|
EgoInc/plasma-contracts
|
849d2706164a96079df42771d083e2ba68d448bd
|
[
"Apache-2.0"
] | 46
|
2018-07-09T12:59:49.000Z
|
2020-04-02T14:17:41.000Z
|
import pytest
from eth_tester.exceptions import TransactionFailed
from plasma_core.constants import NULL_ADDRESS
def test_challenge_in_flight_exit_not_canonical_should_succeed(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id, account=owner_2)
in_flight_exit = testlang.get_in_flight_exit(spend_id)
assert in_flight_exit.bond_owner == owner_2.address
assert in_flight_exit.oldest_competitor == double_spend_id
assert not in_flight_exit.is_canonical
def test_challenge_in_flight_exit_not_canonical_wrong_period_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.forward_to_period(2)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id, account=owner_2)
def test_challenge_in_flight_exit_not_canonical_same_tx_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)], force_invalid=True)
testlang.start_in_flight_exit(spend_id)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id, account=owner_2)
@pytest.mark.parametrize("deposit_as_input", [0, 1])
def test_challenge_in_flight_exit_not_canonical_unrelated_tx_should_fail(testlang, deposit_as_input):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id_1 = testlang.deposit(owner_1, amount)
deposit_id_2 = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id_1], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
unrelated_spend_id = testlang.spend_utxo([deposit_id_2], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
spend_tx = testlang.child_chain.get_transaction(spend_id)
unrelated_spend_tx = testlang.child_chain.get_transaction(unrelated_spend_id)
testlang.start_in_flight_exit(spend_id)
proof = testlang.get_merkle_proof(unrelated_spend_id)
signature = unrelated_spend_tx.signatures[0]
if deposit_as_input == 0:
input_tx_id = deposit_id_1
else:
input_tx_id = deposit_id_2
input_tx = testlang.child_chain.get_transaction(input_tx_id)
with pytest.raises(TransactionFailed):
testlang.root_chain.challengeInFlightExitNotCanonical(spend_tx.encoded, 0, unrelated_spend_tx.encoded, 0,
unrelated_spend_id, proof, signature,
input_tx.encoded, input_tx_id,
**{'from': owner_2.address})
def test_challenge_in_flight_exit_not_canonical_wrong_index_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
spend_tx = testlang.child_chain.get_transaction(spend_id)
double_spend_tx = testlang.child_chain.get_transaction(double_spend_id)
testlang.start_in_flight_exit(spend_id)
proof = testlang.get_merkle_proof(double_spend_id)
signature = double_spend_tx.signatures[0]
input_tx = testlang.child_chain.get_transaction(deposit_id)
with pytest.raises(TransactionFailed):
testlang.root_chain.challengeInFlightExitNotCanonical(spend_tx.encoded, 0, double_spend_tx.encoded, 1,
double_spend_id, proof, signature,
input_tx.encoded, deposit_id,
**{'from': owner_2.address})
def test_challenge_in_flight_exit_not_canonical_invalid_signature_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_2], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
testlang.start_in_flight_exit(spend_id)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id, account=owner_2)
def test_challenge_in_flight_exit_not_canonical_invalid_proof_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
spend_tx = testlang.child_chain.get_transaction(spend_id)
double_spend_tx = testlang.child_chain.get_transaction(double_spend_id)
testlang.start_in_flight_exit(spend_id)
proof = b''
signature = double_spend_tx.signatures[0]
deposit_tx = testlang.child_chain.get_transaction(deposit_id)
with pytest.raises(TransactionFailed):
testlang.root_chain.challengeInFlightExitNotCanonical(spend_tx.encoded, 0, double_spend_tx.encoded, 0,
double_spend_id, proof, signature,
deposit_tx.encoded, deposit_id,
**{'from': owner_2.address})
def test_challenge_in_flight_exit_not_canonical_same_tx_twice_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id, account=owner_2)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id, account=owner_2)
def test_challenge_in_flight_exit_twice_older_position_should_succeed(testlang):
owner_1, owner_2, owner_3, amount = testlang.accounts[0], testlang.accounts[1], testlang.accounts[2], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id_1 = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
double_spend_id_2 = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 50)],
force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id_2, account=owner_2)
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id_1, account=owner_3)
in_flight_exit = testlang.get_in_flight_exit(spend_id)
assert in_flight_exit.bond_owner == owner_3.address
assert in_flight_exit.oldest_competitor == double_spend_id_1
assert not in_flight_exit.is_canonical
def test_challenge_in_flight_exit_twice_younger_position_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [(owner_2.address, NULL_ADDRESS, 100)])
double_spend_id_1 = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 100)],
force_invalid=True)
double_spend_id_2 = testlang.spend_utxo([deposit_id], [owner_1], [(owner_1.address, NULL_ADDRESS, 50)],
force_invalid=True)
testlang.start_in_flight_exit(spend_id)
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id_1, account=owner_2)
with pytest.raises(TransactionFailed):
testlang.challenge_in_flight_exit_not_canonical(spend_id, double_spend_id_2, account=owner_2)
| 52.497326
| 126
| 0.706733
| 1,294
| 9,817
| 4.914992
| 0.061824
| 0.073742
| 0.075472
| 0.041509
| 0.93239
| 0.919025
| 0.901572
| 0.858648
| 0.841981
| 0.821698
| 0
| 0.029836
| 0.20108
| 9,817
| 186
| 127
| 52.77957
| 0.781079
| 0
| 0
| 0.651852
| 0
| 0
| 0.002852
| 0
| 0
| 0
| 0
| 0
| 0.044444
| 1
| 0.074074
| false
| 0
| 0.022222
| 0
| 0.096296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c32aa7674a116fe94e2ee08b56abd5e9e9ff9d2f
| 12,971
|
py
|
Python
|
tests/test_roles.py
|
KrishnaKanth1729/API
|
54c295379633f7a434c00f40da4f784c5d43a84f
|
[
"MIT"
] | null | null | null |
tests/test_roles.py
|
KrishnaKanth1729/API
|
54c295379633f7a434c00f40da4f784c5d43a84f
|
[
"MIT"
] | null | null | null |
tests/test_roles.py
|
KrishnaKanth1729/API
|
54c295379633f7a434c00f40da4f784c5d43a84f
|
[
"MIT"
] | null | null | null |
import pytest
from httpx import AsyncClient
from api.models import Role, UserRole
from api.models.permissions import ManageRoles
@pytest.fixture
async def manage_roles_role(db):
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), $1, $2, $3, (SELECT COUNT(*) FROM roles) + 1)
RETURNING *;
"""
record = await Role.pool.fetchrow(
query, "Roles Manager", 0x000, ManageRoles().value
)
yield Role(**record)
await db.execute("DELETE FROM roles WHERE id = $1;", record["id"])
@pytest.mark.db
@pytest.mark.asyncio
@pytest.mark.parametrize(
("data", "status"),
[
({}, 422),
({"name": ""}, 422),
({"permissions": -1}, 422),
({"name": "test1", "color": "0xffffff"}, 422),
({"name": "test1", "color": "-0x000001"}, 422),
({"name": "test2", "color": "0x000000", "permissions": 8}, 403),
({"name": "test2", "color": "0x000000", "permissions": 0}, 201),
({"name": "test2", "color": "0x000000", "permissions": 0}, 409),
({"name": "test3", "color": "black", "permissions": 0}, 201),
({"name": "test4", "color": "#bafc03", "permissions": 0}, 201),
],
)
async def test_role_create(
app: AsyncClient, db, user, token, manage_roles_role, data, status
):
try:
await UserRole.create(user.id, manage_roles_role.id)
res = await app.post(
"/api/v1/roles", json=data, headers={"Authorization": token}
)
assert res.status_code == status
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
if status == 409:
await db.execute("DELETE FROM roles WHERE name = $1", data["name"])
@pytest.mark.db
@pytest.mark.asyncio
async def test_fetch_all_roles(app: AsyncClient):
res = await app.get("/api/v1/roles")
assert res.status_code == 200
assert type(res.json()) == list
@pytest.mark.db
@pytest.mark.asyncio
@pytest.mark.parametrize(
("request_data", "new_data", "status"),
[
({}, {"name": "test update", "permissions": 0, "color": "0x000"}, 204),
(
{"name": ""},
{"name": "test update", "permissions": 0, "color": "0x000"},
422,
),
(
{"permissions": -1},
{"name": "test update", "permissions": 0, "color": "0x000"},
422,
),
(
{"color": "0xffffff"},
{"name": "test update", "permissions": 0, "color": "0x000"},
422,
),
(
{"color": "-0x000001"},
{"name": "test update", "permissions": 0, "color": "0x000"},
422,
),
(
{"color": "0x005", "permissions": 8},
{"name": "test update", "permissions": 0, "color": "0x000"},
403,
),
(
{"color": "black", "permissions": 8},
{"name": "test update", "permissions": 0, "color": "#bafc03"},
403,
),
(
{"color": "0x005", "permissions": ManageRoles().value},
{
"name": "test update",
"permissions": ManageRoles().value,
"color": "0x005",
},
204,
),
],
)
async def test_role_update(
app: AsyncClient, db, user, token, manage_roles_role, request_data, new_data, status
):
try:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), 'test update', 0, 0, (SELECT COUNT(*) FROM roles) + 1)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query))
await UserRole.create(user.id, manage_roles_role.id)
res = await app.patch(
f"/api/v1/roles/{role.id}",
json=request_data,
headers={"Authorization": token},
)
assert res.status_code == status
role = await Role.fetch(role.id)
data = role.as_dict()
data.pop("id")
data.pop("position")
assert data == new_data
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_role_delete(app: AsyncClient, db, user, token, manage_roles_role):
try:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), 'test delete', 0, 0, (SELECT COUNT(*) FROM roles) + 1)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query))
await UserRole.create(user.id, manage_roles_role.id)
res = await app.delete(
f"/api/v1/roles/{role.id}",
headers={"Authorization": token},
)
assert res.status_code == 204
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_role_delete_high_position(
app: AsyncClient, db, user, token, manage_roles_role
):
try:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), 'test delete', 0, 0, 0)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query))
await UserRole.create(user.id, manage_roles_role.id)
res = await app.delete(
f"/api/v1/roles/{role.id}",
headers={"Authorization": token},
)
assert res.status_code == 403
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_role_add(app: AsyncClient, db, user, token, manage_roles_role):
try:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), 'test add', 0, 0, (SELECT COUNT(*) FROM roles) + 1)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query))
await UserRole.create(user.id, manage_roles_role.id)
res = await app.put(
f"/api/v1/roles/{role.id}/members/{user.id}",
headers={"Authorization": token},
)
assert res.status_code == 204
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_role_add_high_position(
app: AsyncClient, db, user, token, manage_roles_role
):
try:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), 'test add', 0, 0, 0)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query))
await UserRole.create(user.id, manage_roles_role.id)
res = await app.put(
f"/api/v1/roles/{role.id}/members/{user.id}",
headers={"Authorization": token},
)
assert res.status_code == 403
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_role_remove(app: AsyncClient, db, user, token, manage_roles_role):
try:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), 'test remove', 0, 0, (SELECT COUNT(*) FROM roles) + 1)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query))
await UserRole.create(user.id, manage_roles_role.id)
res = await app.delete(
f"/api/v1/roles/{role.id}/members/{user.id}",
headers={"Authorization": token},
)
assert res.status_code == 204
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_role_remove_high_position(
app: AsyncClient, db, user, token, manage_roles_role
):
try:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), 'test remove', 0, 0, 0)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query))
await UserRole.create(user.id, manage_roles_role.id)
res = await app.delete(
f"/api/v1/roles/{role.id}/members/{user.id}",
headers={"Authorization": token},
)
assert res.status_code == 403
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_update_role_positions_up(
app: AsyncClient, db, user, token, manage_roles_role
):
try:
roles = []
# manage roles -> 1 -> 3 -> 2 -> 4
role_names = ["1", "3", "2", "4"]
for role_name in role_names:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), $1, 0, 0, (SELECT COUNT(*) FROM roles) + 1)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query, role_name))
roles.append(role)
await UserRole.create(user.id, manage_roles_role.id)
res = await app.patch(
f"/api/v1/roles/{roles[2].id}",
json={"position": 3},
headers={"Authorization": token},
)
assert res.status_code == 204
res = await app.get("/api/v1/roles")
new_roles = sorted(res.json(), key=lambda x: x["position"])
for i, role in enumerate(new_roles, 1):
assert (
role["position"] == i
) # make sure roles are ordered with no missing positions
for i in range(1, 5):
assert new_roles[i]["name"] == str(i)
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
for role in roles:
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
@pytest.mark.db
@pytest.mark.asyncio
async def test_update_role_positions_down(
app: AsyncClient, db, user, token, manage_roles_role
):
try:
roles = []
# manage roles -> 1 -> 3 -> 2 -> 4
role_names = ["1", "3", "2", "4"]
for role_name in role_names:
query = """
INSERT INTO roles (id, name, color, permissions, position)
VALUES (create_snowflake(), $1, 0, 0, (SELECT COUNT(*) FROM roles) + 1)
RETURNING *;
"""
role = Role(**await Role.pool.fetchrow(query, role_name))
roles.append(role)
await UserRole.create(user.id, manage_roles_role.id)
res = await app.patch(
f"/api/v1/roles/{roles[1].id}",
json={"position": 4},
headers={"Authorization": token},
)
assert res.status_code == 204
res = await app.get("/api/v1/roles")
new_roles = sorted(res.json(), key=lambda x: x["position"])
for i, role in enumerate(new_roles, 1):
assert (
role["position"] == i
) # make sure roles are ordered with no missing positions
for i in range(1, 5):
assert new_roles[i]["name"] == str(i)
finally:
await db.execute(
"DELETE FROM userroles WHERE role_id = $1 AND user_id = $2;",
manage_roles_role.id,
user.id,
)
for role in roles:
await db.execute("DELETE FROM roles WHERE id = $1", role.id)
| 31.636585
| 98
| 0.538355
| 1,509
| 12,971
| 4.523526
| 0.083499
| 0.041313
| 0.068122
| 0.061529
| 0.878406
| 0.862731
| 0.847495
| 0.832699
| 0.802666
| 0.74143
| 0
| 0.033019
| 0.320561
| 12,971
| 409
| 99
| 31.713936
| 0.741518
| 0.013337
| 0
| 0.694915
| 0
| 0.019774
| 0.3208
| 0.022434
| 0
| 0
| 0.008286
| 0
| 0.048023
| 1
| 0
| false
| 0
| 0.011299
| 0
| 0.011299
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f32fb422bdca18fe4dc2aa522b97e6aebe35fc8
| 19,144
|
py
|
Python
|
tests/unit/states/test_win_path.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
tests/unit/states/test_win_path.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
tests/unit/states/test_win_path.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
'''
Tests for win_path states
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
Mock,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.states.win_path as win_path
NAME = 'salt'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinPathTestCase(TestCase, LoaderModuleMockMixin):
'''
Validate the win_path state
'''
def setup_loader_modules(self):
return {win_path: {}}
def test_absent(self):
'''
Test various cases for win_path.absent
'''
ret_base = {'name': NAME, 'result': True, 'changes': {}}
def _mock(retval):
# Return a new MagicMock for each test case
return MagicMock(side_effect=retval)
# We don't really want to run the remove func
with patch.dict(win_path.__salt__, {'win_path.remove': Mock()}):
# Test mode OFF
with patch.dict(win_path.__opts__, {'test': False}):
# Test already absent
with patch.dict(win_path.__salt__, {'win_path.exists': _mock([False])}):
ret = copy.deepcopy(ret_base)
ret['comment'] = '{0} is not in the PATH'.format(NAME)
ret['result'] = True
self.assertDictEqual(win_path.absent(NAME), ret)
# Test successful removal
with patch.dict(win_path.__salt__, {'win_path.exists': _mock([True, False])}):
ret = copy.deepcopy(ret_base)
ret['comment'] = 'Removed {0} from the PATH'.format(NAME)
ret['changes']['removed'] = NAME
ret['result'] = True
self.assertDictEqual(win_path.absent(NAME), ret)
# Test unsucessful removal
with patch.dict(win_path.__salt__, {'win_path.exists': _mock([True, True])}):
ret = copy.deepcopy(ret_base)
ret['comment'] = 'Failed to remove {0} from the PATH'.format(NAME)
ret['result'] = False
self.assertDictEqual(win_path.absent(NAME), ret)
# Test mode ON
with patch.dict(win_path.__opts__, {'test': True}):
# Test already absent
with patch.dict(win_path.__salt__, {'win_path.exists': _mock([False])}):
ret = copy.deepcopy(ret_base)
ret['comment'] = '{0} is not in the PATH'.format(NAME)
ret['result'] = True
self.assertDictEqual(win_path.absent(NAME), ret)
# Test the test-mode return
with patch.dict(win_path.__salt__, {'win_path.exists': _mock([True])}):
ret = copy.deepcopy(ret_base)
ret['comment'] = '{0} would be removed from the PATH'.format(NAME)
ret['result'] = None
self.assertDictEqual(win_path.absent(NAME), ret)
def test_exists_invalid_index(self):
'''
Tests win_path.exists when a non-integer index is specified.
'''
ret = win_path.exists(NAME, index='foo')
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': False,
'comment': 'Index must be an integer'
}
)
def test_exists_add_no_index_success(self):
'''
Tests win_path.exists when the directory isn't already in the PATH and
no index is specified (successful run).
'''
add_mock = MagicMock(return_value=True)
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', 'baz'],
['foo', 'bar', 'baz', NAME]
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME)
add_mock.assert_called_once_with(NAME, index=None, rehash=False)
self.assert_called_once(rehash_mock)
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {'index': {'old': None, 'new': 3}},
'result': True,
'comment': 'Added {0} to the PATH.'.format(NAME)
}
)
def test_exists_add_no_index_failure(self):
'''
Tests win_path.exists when the directory isn't already in the PATH and
no index is specified (failed run).
'''
add_mock = MagicMock(return_value=False)
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', 'baz'],
['foo', 'bar', 'baz']
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME)
add_mock.assert_called_once_with(NAME, index=None, rehash=False)
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': False,
'comment': 'Failed to add {0} to the PATH.'.format(NAME)
}
)
def test_exists_add_no_index_failure_exception(self):
'''
Tests win_path.exists when the directory isn't already in the PATH and
no index is specified (failed run due to exception).
'''
add_mock = MagicMock(side_effect=Exception('Global Thermonuclear War'))
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', 'baz'],
['foo', 'bar', 'baz']
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME)
add_mock.assert_called_once_with(NAME, index=None, rehash=False)
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': False,
'comment': 'Encountered error: Global Thermonuclear War. '
'Failed to add {0} to the PATH.'.format(NAME)
}
)
def test_exists_change_index_success(self):
'''
Tests win_path.exists when the directory is already in the PATH and
needs to be moved to a different position (successful run).
'''
add_mock = MagicMock(return_value=True)
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', 'baz', NAME],
[NAME, 'foo', 'bar', 'baz']
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=0)
add_mock.assert_called_once_with(NAME, index=0, rehash=False)
self.assert_called_once(rehash_mock)
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {'index': {'old': 3, 'new': 0}},
'result': True,
'comment': 'Moved {0} from index 3 to 0.'.format(NAME)
}
)
def test_exists_change_negative_index_success(self):
'''
Tests win_path.exists when the directory is already in the PATH and
needs to be moved to a different position (successful run).
This tests a negative index.
'''
add_mock = MagicMock(return_value=True)
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', NAME, 'baz'],
['foo', 'bar', 'baz', NAME]
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=-1)
add_mock.assert_called_once_with(NAME, index=-1, rehash=False)
self.assert_called_once(rehash_mock)
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {'index': {'old': -2, 'new': -1}},
'result': True,
'comment': 'Moved {0} from index -2 to -1.'.format(NAME)
}
)
def test_exists_change_index_add_exception(self):
'''
Tests win_path.exists when the directory is already in the PATH but an
exception is raised when we attempt to add the key to its new location.
'''
add_mock = MagicMock(side_effect=Exception('Global Thermonuclear War'))
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', 'baz', NAME],
['foo', 'bar', 'baz', NAME],
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=0)
add_mock.assert_called_once_with(NAME, index=0, rehash=False)
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': False,
'comment': 'Encountered error: Global Thermonuclear War. '
'Failed to move {0} from index 3 to 0.'.format(NAME)
}
)
def test_exists_change_negative_index_add_exception(self):
'''
Tests win_path.exists when the directory is already in the PATH but an
exception is raised when we attempt to add the key to its new location.
This tests a negative index.
'''
add_mock = MagicMock(side_effect=Exception('Global Thermonuclear War'))
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', NAME, 'baz'],
['foo', 'bar', NAME, 'baz'],
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=-1)
add_mock.assert_called_once_with(NAME, index=-1, rehash=False)
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': False,
'comment': 'Encountered error: Global Thermonuclear War. '
'Failed to move {0} from index -2 to -1.'.format(NAME)
}
)
def test_exists_change_index_failure(self):
'''
Tests win_path.exists when the directory is already in the PATH and
needs to be moved to a different position (failed run).
'''
add_mock = MagicMock(return_value=False)
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', 'baz', NAME],
['foo', 'bar', 'baz', NAME]
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=0)
add_mock.assert_called_once_with(NAME, index=0, rehash=False)
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': False,
'comment': 'Failed to move {0} from index 3 to 0.'.format(NAME)
}
)
def test_exists_change_negative_index_failure(self):
'''
Tests win_path.exists when the directory is already in the PATH and
needs to be moved to a different position (failed run).
This tests a negative index.
'''
add_mock = MagicMock(return_value=False)
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', NAME, 'baz'],
['foo', 'bar', NAME, 'baz']
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': False}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=-1)
add_mock.assert_called_once_with(NAME, index=-1, rehash=False)
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': False,
'comment': 'Failed to move {0} from index -2 to -1.'.format(NAME)
}
)
def test_exists_change_index_test_mode(self):
'''
Tests win_path.exists when the directory is already in the PATH and
needs to be moved to a different position (test mode enabled).
'''
add_mock = Mock()
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', 'baz', NAME],
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': True}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=0)
add_mock.assert_not_called()
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {'index': {'old': 3, 'new': 0}},
'result': None,
'comment': '{0} would be moved from index 3 to 0.'.format(NAME)
}
)
def test_exists_change_negative_index_test_mode(self):
'''
Tests win_path.exists when the directory is already in the PATH and
needs to be moved to a different position (test mode enabled).
'''
add_mock = Mock()
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[
['foo', 'bar', NAME, 'baz'],
]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': True}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=-1)
add_mock.assert_not_called()
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {'index': {'old': -2, 'new': -1}},
'result': None,
'comment': '{0} would be moved from index -2 to -1.'.format(NAME)
}
)
def _test_exists_add_already_present(self, index, test_mode):
'''
Tests win_path.exists when the directory already exists in the PATH.
Helper function to test both with and without and index, and with test
mode both disabled and enabled.
'''
current_path = ['foo', 'bar', 'baz']
if index is None:
current_path.append(NAME)
else:
pos = index if index >= 0 else len(current_path) + index + 1
current_path.insert(pos, NAME)
add_mock = Mock()
rehash_mock = MagicMock(return_value=True)
dunder_salt = {
'win_path.get_path': MagicMock(side_effect=[current_path]),
'win_path.add': add_mock,
'win_path.rehash': rehash_mock,
}
dunder_opts = {'test': test_mode}
with patch.dict(win_path.__salt__, dunder_salt), \
patch.dict(win_path.__opts__, dunder_opts):
ret = win_path.exists(NAME, index=index)
add_mock.assert_not_called()
rehash_mock.assert_not_called()
self.assertDictEqual(
ret,
{
'name': NAME,
'changes': {},
'result': True,
'comment': '{0} already exists in the PATH{1}.'.format(
NAME,
' at index {0}'.format(index) if index is not None else ''
)
}
)
def test_exists_add_no_index_already_present(self):
self._test_exists_add_already_present(None, False)
def test_exists_add_no_index_already_present_test_mode(self):
self._test_exists_add_already_present(None, True)
def test_exists_add_index_already_present(self):
self._test_exists_add_already_present(1, False)
self._test_exists_add_already_present(2, False)
self._test_exists_add_already_present(-1, False)
self._test_exists_add_already_present(-2, False)
def test_exists_add_index_already_present_test_mode(self):
self._test_exists_add_already_present(1, True)
self._test_exists_add_already_present(2, True)
self._test_exists_add_already_present(-1, True)
self._test_exists_add_already_present(-2, True)
| 35.917448
| 94
| 0.545341
| 2,167
| 19,144
| 4.523766
| 0.073373
| 0.079261
| 0.039172
| 0.052229
| 0.877691
| 0.867592
| 0.860247
| 0.831174
| 0.805978
| 0.794553
| 0
| 0.005483
| 0.342666
| 19,144
| 532
| 95
| 35.984962
| 0.773522
| 0.110792
| 0
| 0.64467
| 0
| 0
| 0.127948
| 0
| 0
| 0
| 0
| 0
| 0.106599
| 1
| 0.050761
| false
| 0
| 0.015228
| 0.005076
| 0.073604
| 0.002538
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6f67d7c26991f6f3e93839fef98d54026e55f7be
| 43
|
py
|
Python
|
deepstack/intelligencelayer/shared/recognition/__init__.py
|
mayop/DeepStack
|
8b05c0a69dce65513638def0a8a21c87fd8409f1
|
[
"Apache-2.0"
] | 1
|
2021-01-03T05:47:42.000Z
|
2021-01-03T05:47:42.000Z
|
deepstack/intelligencelayer/shared/recognition/__init__.py
|
robmarkcole/DeepStack
|
8b05c0a69dce65513638def0a8a21c87fd8409f1
|
[
"Apache-2.0"
] | null | null | null |
deepstack/intelligencelayer/shared/recognition/__init__.py
|
robmarkcole/DeepStack
|
8b05c0a69dce65513638def0a8a21c87fd8409f1
|
[
"Apache-2.0"
] | null | null | null |
from .process import FaceRecognitionModel
| 21.5
| 42
| 0.860465
| 4
| 43
| 9.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 1
| 43
| 43
| 0.973684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
488f968c43c887f6b352dbe982315b662748dddf
| 708
|
py
|
Python
|
panel/tests/io/test_state.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | 1,130
|
2019-11-23T09:53:37.000Z
|
2022-03-31T11:30:07.000Z
|
panel/tests/io/test_state.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | 2,265
|
2019-11-20T17:09:09.000Z
|
2022-03-31T22:09:38.000Z
|
panel/tests/io/test_state.py
|
datalayer-contrib/holoviz-panel
|
c97b57e8eaff4b5f542add41f496395da2483b23
|
[
"BSD-3-Clause"
] | 215
|
2019-11-26T11:49:04.000Z
|
2022-03-30T10:23:11.000Z
|
from panel.io.state import state
def test_as_cached_key_only():
global i
i = 0
def test_fn():
global i
i += 1
return i
assert state.as_cached('test', test_fn) == 1
assert state.as_cached('test', test_fn) == 1
state.cache.clear()
def test_as_cached_key_and_kwarg():
global i
i = 0
def test_fn(a):
global i
i += 1
return i
assert state.as_cached('test', test_fn, a=1) == 1
assert state.as_cached('test', test_fn, a=1) == 1
assert state.as_cached('test', test_fn, a=2) == 2
assert state.as_cached('test', test_fn, a=1) == 1
assert state.as_cached('test', test_fn, a=2) == 2
state.cache.clear()
| 20.823529
| 53
| 0.591808
| 118
| 708
| 3.338983
| 0.20339
| 0.182741
| 0.230964
| 0.337563
| 0.822335
| 0.730964
| 0.730964
| 0.639594
| 0.560914
| 0.560914
| 0
| 0.031189
| 0.275424
| 708
| 33
| 54
| 21.454545
| 0.736842
| 0
| 0
| 0.791667
| 0
| 0
| 0.039548
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 1
| 0.166667
| false
| 0
| 0.041667
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
48e419ded9b006397a9539f35919948607e30656
| 103
|
py
|
Python
|
plotly/graph_objs/layout/template/__init__.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/graph_objs/layout/template/__init__.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/graph_objs/layout/template/__init__.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
from ._layout import Layout
from ._data import Data
from plotly.graph_objs.layout.template import data
| 25.75
| 50
| 0.834951
| 16
| 103
| 5.1875
| 0.5
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116505
| 103
| 3
| 51
| 34.333333
| 0.912088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
48fcd2098ed4869efcdf33ce8b0fed6e6a89c44d
| 11,554
|
py
|
Python
|
src/pywink/test/devices/sensor_test.py
|
vickyg3/python-wink
|
1b9f4acd22a6784023ae57c2ff0ef4e26b9a38f7
|
[
"MIT"
] | null | null | null |
src/pywink/test/devices/sensor_test.py
|
vickyg3/python-wink
|
1b9f4acd22a6784023ae57c2ff0ef4e26b9a38f7
|
[
"MIT"
] | null | null | null |
src/pywink/test/devices/sensor_test.py
|
vickyg3/python-wink
|
1b9f4acd22a6784023ae57c2ff0ef4e26b9a38f7
|
[
"MIT"
] | null | null | null |
import json
import os
import unittest
from unittest.mock import MagicMock
from pywink.api import get_devices_from_response_dict
from pywink.devices import types as device_types
from pywink.devices.piggy_bank import WinkPorkfolioBalanceSensor
from pywink.devices.smoke_detector import WinkSmokeDetector, WinkCoDetector, WinkSmokeSeverity, WinkCoSeverity
class SensorTests(unittest.TestCase):
def setUp(self):
super(SensorTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_capability_should_not_be_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SENSOR_POD)
for device in devices:
self.assertIsNotNone(device.capability())
def test_tamper_detected_should_be_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SENSOR_POD)
for device in devices:
self.assertFalse(device.tamper_detected())
def test_unit_is_valid(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SENSOR_POD)
for device in devices:
if device.unit_type() == "boolean":
self.assertIsNone(device.unit())
else:
self.assertIsNotNone(device.unit())
class EggtrayTests(unittest.TestCase):
def setUp(self):
super(EggtrayTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_state_should_be_2(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.EGGTRAY)
for device in devices:
self.assertEqual(device.state(), 2)
def test_capability_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.EGGTRAY)
for device in devices:
self.assertEqual(device.capability(), None)
def test_unit_is_eggs(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.EGGTRAY)
for device in devices:
self.assertEqual(device.unit(), "eggs")
class KeyTests(unittest.TestCase):
def setUp(self):
super(KeyTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_state_should_be_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
self.assertEqual(len(devices), 1)
for device in devices:
self.assertFalse(device.state())
def test_parent_id_should_not_be_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertIsNotNone(device.parent_id())
def test_availble_is_true(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertTrue(device.available())
def test_capability_is_activity_detected(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertEqual(device.capability(), "activity_detected")
def test_unit_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.KEY)
for device in devices:
self.assertIsNone(device.unit())
class PorkfolioTests(unittest.TestCase):
def setUp(self):
super(PorkfolioTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_unit_is_usd(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
self.assertEqual(len(devices), 2)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertEqual(device.unit(), "USD")
def test_capability_is_balance(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertEqual(device.capability(), "balance")
def test_state_is_180(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertEqual(device.state(), 180)
def test_available_is_true(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PIGGY_BANK)
for device in devices:
if isinstance(device, WinkPorkfolioBalanceSensor):
self.assertTrue(device.available())
class GangTests(unittest.TestCase):
def setUp(self):
super(GangTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_unit_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.GANG)
for device in devices:
self.assertIsNone(device.unit())
class SmokeDetectorTests(unittest.TestCase):
def setUp(self):
super(SmokeDetectorTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_test_activated_is_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SMOKE_DETECTOR)
for device in devices:
self.assertFalse(device.test_activated())
def test_unit_is_none(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.SMOKE_DETECTOR)
for device in devices:
if isinstance(device, WinkSmokeDetector):
self.assertIsNone(device.unit())
self.assertEqual(device.unit_type(), "boolean")
if isinstance(device, WinkCoDetector):
self.assertIsNone(device.unit())
self.assertEqual(device.unit_type(), "boolean")
if isinstance(device, WinkSmokeSeverity):
self.assertIsNone(device.unit())
self.assertIsNone(device.unit_type())
if isinstance(device, WinkCoSeverity):
self.assertIsNone(device.unit())
self.assertIsNone(device.unit_type())
class RemoteTests(unittest.TestCase):
def setUp(self):
super(RemoteTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_buttons_press_is_false(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.REMOTE)
remote = devices[0]
self.assertFalse(remote.button_on_pressed())
self.assertFalse(remote.button_off_pressed())
self.assertFalse(remote.button_up_pressed())
self.assertFalse(remote.button_down_pressed())
def test_unit_and_capability(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.REMOTE)
remote = devices[0]
self.assertIsNone(remote.unit())
self.assertEqual(remote.capability(), "opened")
class PropaneTankTests(unittest.TestCase):
def setUp(self):
super(PropaneTankTests, self).setUp()
self.api_interface = MagicMock()
all_devices = os.listdir('{}/api_responses/'.format(os.path.dirname(__file__)))
self.response_dict = {}
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
self.response_dict["data"] = device_list
def test_unit_and_capability(self):
devices = get_devices_from_response_dict(self.response_dict, device_types.PROPANE_TANK)
tank = devices[0]
self.assertIsNone(tank.unit())
self.assertIsNone(tank.capability())
| 42.951673
| 110
| 0.665484
| 1,360
| 11,554
| 5.292647
| 0.082353
| 0.098361
| 0.082245
| 0.088636
| 0.823145
| 0.801056
| 0.764379
| 0.745346
| 0.740206
| 0.714087
| 0
| 0.001449
| 0.223472
| 11,554
| 268
| 111
| 43.11194
| 0.800825
| 0
| 0
| 0.674208
| 0
| 0
| 0.045872
| 0
| 0
| 0
| 0
| 0
| 0.162896
| 1
| 0.131222
| false
| 0
| 0.036199
| 0
| 0.20362
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d2df9595c11e13f4ebeeb549afe8db56881677da
| 127
|
py
|
Python
|
mmdet/ops/nms/__init__.py
|
kuazhangxiaoai/AerialDetection
|
818d7ad2ffb13059cca09a99a2ac6342b2146eb6
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/nms/__init__.py
|
kuazhangxiaoai/AerialDetection
|
818d7ad2ffb13059cca09a99a2ac6342b2146eb6
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/nms/__init__.py
|
kuazhangxiaoai/AerialDetection
|
818d7ad2ffb13059cca09a99a2ac6342b2146eb6
|
[
"Apache-2.0"
] | null | null | null |
#from .nms import nms, soft_nms
#from .rnms import py_cpu_nms_poly_fast
#__all__ = ['nms', 'soft_nms', 'py_cpu_nms_poly_fast']
| 31.75
| 54
| 0.755906
| 23
| 127
| 3.565217
| 0.434783
| 0.170732
| 0.243902
| 0.292683
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110236
| 127
| 3
| 55
| 42.333333
| 0.725664
| 0.952756
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9602339d5694e57ecfb3ffbd1f60a320bed142c4
| 9,213
|
py
|
Python
|
test/integration_test/test_real_network.py
|
heshu-by/likelib-ws
|
85987d328dc274622f4b758afa1b6af43d15564f
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test/test_real_network.py
|
heshu-by/likelib-ws
|
85987d328dc274622f4b758afa1b6af43d15564f
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test/test_real_network.py
|
heshu-by/likelib-ws
|
85987d328dc274622f4b758afa1b6af43d15564f
|
[
"Apache-2.0"
] | null | null | null |
from tester import test_case, Node, NodePool
#from tester import test_case, Node, NodePool
import time
import random
""" Второй пул видит только ноды из первого пула.
После запуска двух пулов даётся время на синхронизацию (ожидание),
Во втором пуле ноды делают по одной транзакции каждая, ожидание
Все ноды первого и второго пула проверяют все транзакции
Если ноды во втором пуле не видят друг друга, тест свалится
Если ноды первого пула всё проверили, а во втором ошибки - значит ноды первого пула не передали блоки, считая что во втором пуле ноды уже видят друг друга
Иногда тест падает, если transf_timeout слишком маленький (увеличить его или уменьшить сложность майнинга) """
@test_case("real_network_2_pools_non_stop")
def main(env, logger):
pool_cfg_1 = {'name':"first" , 'nodes':1, 'timeout':1, 'mining_thr':1,
'start_sync_port':20100, 'start_rpc_port':50100}
pool_cfg_2 = {'name':"second", 'nodes':2, 'timeout':1, 'mining_thr':1,
'start_sync_port':20200, 'start_rpc_port':50200}
sync_timeout = 2
transf_timeout = 4
transf_sum = 100
with NodePool.create_every_to_previous(env, logger, pool_cfg_1['start_sync_port'],
pool_cfg_1['start_rpc_port'], pool_cfg_1['nodes'], pool_cfg_1['mining_thr']) as nodes_1:
nodes_1.start_nodes(pool_cfg_1['timeout'])
for node in nodes_1:
node.run_check_test()
with NodePool.create_every_to_custom_list(env,logger, pool_cfg_2['start_sync_port'],
pool_cfg_2['start_rpc_port'], pool_cfg_2['nodes'], nodes_1.ids,
pool_cfg_2['mining_thr']) as nodes_2:
nodes_2.start_nodes(pool_cfg_2['timeout'])
for node in nodes_2:
node.run_check_test()
time.sleep(sync_timeout)
target_addresses = []
for node in nodes_2:
target_addresses.append(node.create_new_address(keys_path="key1"))
node.run_check_balance(target_addresses[-1], 0)
distributor_address = node.load_address(keys_path=Node.DISTRIBUTOR_ADDRESS_PATH)
node.run_check_transfer(to_address=target_addresses[-1], amount=transf_sum,
from_address=distributor_address, fee=0,
timeout=transf_timeout, wait=0)
node.run_check_balance(target_addresses[-1], transf_sum)
time.sleep(sync_timeout)
for node in nodes_1:
for target_addr in target_addresses:
node.run_check_balance(target_addr, transf_sum)
for node in nodes_2:
for target_addr in target_addresses:
node.run_check_balance(target_addr, transf_sum)
return 0
""" Второй пул видит только ноды из первого пула.
После запуска двух пулов даётся время на синхронизацию (ожидание), первый пул отключается, ожидание
Во втором пуле ноды делают по одной транзакции каждая, ожидание
Все ноды второго пула проверяют все транзакции
Если ноды во втором пуле не видят друг друга, тест свалится
Иногда тест падает, если transf_timeout слишком маленький (увеличить его или уменьшить сложность майнинга) """
@test_case("real_network_2_pools_first_pool_stopped")
def main(env, logger):
pool_cfg_1 = {'name':"first" , 'nodes':1, 'timeout':1, 'mining_thr':1,
'start_sync_port':20100, 'start_rpc_port':50100}
pool_cfg_2 = {'name':"second", 'nodes':2, 'timeout':1, 'mining_thr':1,
'start_sync_port':20200, 'start_rpc_port':50200}
sync_timeout = 2
transf_timeout = 4
transf_sum = 100
with NodePool.create_every_to_previous(env, logger, pool_cfg_1['start_sync_port'],
pool_cfg_1['start_rpc_port'], pool_cfg_1['nodes'], pool_cfg_1['mining_thr']) as nodes_1:
nodes_1.start_nodes(pool_cfg_1['timeout'])
for node in nodes_1:
node.run_check_test()
with NodePool.create_every_to_custom_list(env,logger, pool_cfg_2['start_sync_port'],
pool_cfg_2['start_rpc_port'], pool_cfg_2['nodes'], nodes_1.ids,
pool_cfg_2['mining_thr']) as nodes_2:
nodes_2.start_nodes(pool_cfg_2['timeout'])
for node in nodes_2:
node.run_check_test()
time.sleep(sync_timeout)
for node in nodes_1: node.close()
time.sleep(sync_timeout)
target_addresses = []
for node in nodes_2:
target_addresses.append(node.create_new_address(keys_path="key1"))
node.run_check_balance(target_addresses[-1], 0)
distributor_address = node.load_address(keys_path=Node.DISTRIBUTOR_ADDRESS_PATH)
node.run_check_transfer(to_address=target_addresses[-1], amount=transf_sum,
from_address=distributor_address, fee=0,
timeout=transf_timeout, wait=0)
node.run_check_balance(target_addresses[-1], transf_sum)
time.sleep(sync_timeout)
for node in nodes_2:
for target_addr in target_addresses:
node.run_check_balance(target_addr, transf_sum)
return 0
""" Идея для теста - запустить три пула нод:
- Первый достоверный, они всегда работают и грантированно отвечают (сервера)
- Второй переодически отключает поочерёдно ноды,
а потом включает их с сохранением базы (типа юзеры)
- Третий имеет проблемы с сетью (эмулируются задержки и потери пакетов)
или
- Третий - это клисенты которые приходят и уходят, очищая базу
Проводятся транзакции во втором и третьем пуле
После некоторого времени работы, проверяется синхронность нод """
# На данный момент реализован первый и второй пул - как задумано
# Третий пул - просто проводит транзакции (каждая нода)
# Все три пула производят проверку произведённых транзакций
# Если какой то транзакции нет - тест падает
@test_case("real_network_3_pools")
def main(env, logger):
pool_cfg_1 = {'name':"first" , 'nodes':1, 'timeout':1, 'mining_thr':1,
'start_sync_port':20100, 'start_rpc_port':50100, 'clean_db':False}
pool_cfg_2 = {'name':"second", 'nodes':5, 'timeout':1, 'mining_thr':1,
'start_sync_port':20200, 'start_rpc_port':50200, 'clean_db':False}
pool_cfg_3 = {'name':"second", 'nodes':2, 'timeout':1, 'mining_thr':1,
'start_sync_port':20300, 'start_rpc_port':50300, 'clean_db':True}
sync_timeout = 2
transf_timeout = 4
transf_sum = 100
with NodePool.create_every_to_previous(env, logger, pool_cfg_1['start_sync_port'],
pool_cfg_1['start_rpc_port'], pool_cfg_1['nodes'], pool_cfg_1['mining_thr']) as nodes_1:
nodes_1.start_nodes(pool_cfg_1['timeout'])
for node in nodes_1:
node.run_check_test()
with NodePool.create_every_to_custom_list(env,logger, pool_cfg_2['start_sync_port'],
pool_cfg_2['start_rpc_port'], pool_cfg_2['nodes'], nodes_1.ids,
pool_cfg_2['mining_thr']) as nodes_2:
nodes_2.start_nodes(pool_cfg_2['timeout'])
for node in nodes_2:
node.run_check_test()
with NodePool.create_every_to_custom_list(env,logger, pool_cfg_3['start_sync_port'],
pool_cfg_3['start_rpc_port'], pool_cfg_3['nodes'], nodes_1.ids,
pool_cfg_3['mining_thr']) as nodes_3:
nodes_3.start_nodes(pool_cfg_3['timeout'])
for node in nodes_3:
node.run_check_test()
time.sleep(sync_timeout)
# Все три пула запущены и синхронизированны
down_nodes = []
target_addresses = []
# Закрываем половину нод из 2 пула (меньшую половину, если нечётное)
for i in range(0, pool_cfg_2['nodes']//2):
node = nodes_2.pop(random.randrange(len(nodes_2)))
node.close()
down_nodes.append(node)
logger.debug("Node: " + str(node.settings.id.listen_rpc_address) + " down")
# В третьем пуле каждая нода делает по одной транзакции
for node in nodes_3:
target_addresses.append(node.create_new_address(keys_path="key1"))
node.run_check_balance(target_addresses[-1], 0)
distributor_address = node.load_address(keys_path=Node.DISTRIBUTOR_ADDRESS_PATH)
node.run_check_transfer(to_address=target_addresses[-1], amount=transf_sum,
from_address=distributor_address, fee=0,
timeout=transf_timeout, wait=0)
node.run_check_balance(target_addresses[-1], transf_sum)
time.sleep(sync_timeout)
# Запуск всех остановленых нод
for i in range(0, len(down_nodes)):
node = down_nodes.pop()
node.start_node(pool_cfg_2['timeout'])
nodes_2.append(node)
logger.debug("Node: " + str(node.settings.id.listen_rpc_address) + " started")
for node in nodes_2:
node.run_check_test()
logger.debug("All nodes in nodes_2 started. Synchronyzation")
time.sleep(sync_timeout)
# Проверка синхронности
for node in nodes_1:
for target_addr in target_addresses:
node.run_check_balance(target_addr, transf_sum)
for node in nodes_3:
for target_addr in target_addresses:
node.run_check_balance(target_addr, transf_sum)
for node in nodes_2:
for target_addr in target_addresses:
node.run_check_balance(target_addr, transf_sum)
return 0
| 46.530303
| 155
| 0.685987
| 1,318
| 9,213
| 4.486343
| 0.173748
| 0.052089
| 0.046677
| 0.042618
| 0.783359
| 0.761035
| 0.753594
| 0.740741
| 0.734652
| 0.726873
| 0
| 0.030673
| 0.217953
| 9,213
| 197
| 156
| 46.766497
| 0.790007
| 0.051666
| 0
| 0.802817
| 0
| 0
| 0.136552
| 0.009408
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021127
| false
| 0
| 0.021127
| 0
| 0.06338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
82da278c5bdc6d9016f38dcbd8ccaa342dda5f64
| 19
|
py
|
Python
|
project/store/managers/__init__.py
|
aliharby12/Book-Store
|
d2d1d374f58ad1e64e5e470567f6cf347f5cf09a
|
[
"MIT"
] | 1
|
2020-08-26T12:11:52.000Z
|
2020-08-26T12:11:52.000Z
|
project/store/managers/__init__.py
|
aliharby12/Book-Store
|
d2d1d374f58ad1e64e5e470567f6cf347f5cf09a
|
[
"MIT"
] | 1
|
2021-04-30T21:10:01.000Z
|
2021-04-30T21:10:01.000Z
|
project/store/managers/__init__.py
|
aliharby12/Book-Store
|
d2d1d374f58ad1e64e5e470567f6cf347f5cf09a
|
[
"MIT"
] | 2
|
2020-08-26T12:11:55.000Z
|
2020-08-26T13:42:09.000Z
|
from .user import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d6185ab04188fd1fbac8783ef4101d906fb73cc
| 48
|
py
|
Python
|
nlingua/corpora/__init__.py
|
clueless-skywatcher/polyglossa
|
93bdfe3da457454fd984c0af4bf3a6db724d6b56
|
[
"MIT"
] | null | null | null |
nlingua/corpora/__init__.py
|
clueless-skywatcher/polyglossa
|
93bdfe3da457454fd984c0af4bf3a6db724d6b56
|
[
"MIT"
] | null | null | null |
nlingua/corpora/__init__.py
|
clueless-skywatcher/polyglossa
|
93bdfe3da457454fd984c0af4bf3a6db724d6b56
|
[
"MIT"
] | null | null | null |
from .base import *
from .penn_treebank import *
| 24
| 28
| 0.770833
| 7
| 48
| 5.142857
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 28
| 24
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7d75f688c6ceca2047b63dbd728ca6110c7c8f7c
| 6,222
|
py
|
Python
|
python/cuml/test/dask/test_coordinate_descent.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 2,743
|
2018-10-11T17:28:58.000Z
|
2022-03-31T19:20:50.000Z
|
python/cuml/test/dask/test_coordinate_descent.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 4,280
|
2018-10-11T22:29:57.000Z
|
2022-03-31T22:02:44.000Z
|
python/cuml/test/dask/test_coordinate_descent.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | 454
|
2018-10-11T17:40:56.000Z
|
2022-03-25T17:07:09.000Z
|
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from cuml.dask.datasets import make_regression
from cuml.dask.linear_model import ElasticNet
from cuml.dask.linear_model import Lasso
from cuml.metrics import r2_score
from cuml.test.utils import unit_param, quality_param, stress_param
import numpy as np
@pytest.mark.mg
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('alpha', [0.001])
@pytest.mark.parametrize('algorithm', ['cyclic', 'random'])
@pytest.mark.parametrize('nrows', [unit_param(50),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(4),
quality_param(32),
stress_param(64)])
@pytest.mark.parametrize("delayed", [True, False])
def test_lasso(dtype, alpha, algorithm,
nrows, column_info, n_parts, delayed, client):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype)
lasso = Lasso(alpha=np.array([alpha]), fit_intercept=True,
normalize=False, max_iter=1000,
selection=algorithm, tol=1e-10,
client=client)
lasso.fit(X, y)
y_hat = lasso.predict(X, delayed=delayed)
assert r2_score(y.compute(), y_hat.compute()) >= 0.99
@pytest.mark.mg
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('nrows', [unit_param(50),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(16),
quality_param(32),
stress_param(64)])
def test_lasso_default(dtype, nrows, column_info, n_parts, client):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
client=client,
dtype=dtype)
lasso = Lasso(client=client)
lasso.fit(X, y)
y_hat = lasso.predict(X)
assert r2_score(y.compute(), y_hat.compute()) >= 0.99
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('alpha', [0.5])
@pytest.mark.parametrize('algorithm', ['cyclic', 'random'])
@pytest.mark.parametrize('nrows', [unit_param(500),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(16),
quality_param(32),
stress_param(64)])
@pytest.mark.parametrize("delayed", [True, False])
def test_elastic_net(dtype, alpha, algorithm,
nrows, column_info, n_parts, client, delayed):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype)
elasticnet = ElasticNet(alpha=np.array([alpha]), fit_intercept=True,
normalize=False, max_iter=1000,
selection=algorithm, tol=1e-10,
client=client)
elasticnet.fit(X, y)
y_hat = elasticnet.predict(X, delayed=delayed)
# based on differences with scikit-learn 0.22
if alpha == 0.2:
assert r2_score(y.compute(), y_hat.compute()) >= 0.96
else:
assert r2_score(y.compute(), y_hat.compute()) >= 0.80
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('nrows', [unit_param(500),
quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('column_info', [unit_param([20, 10]),
quality_param([100, 50]),
stress_param([1000, 500])])
@pytest.mark.parametrize('n_parts', [unit_param(16),
quality_param(32),
stress_param(64)])
def test_elastic_net_default(dtype, nrows, column_info, n_parts, client):
ncols, n_info = column_info
X, y = make_regression(n_samples=nrows,
n_features=ncols,
n_informative=n_info,
n_parts=n_parts,
client=client,
dtype=dtype)
elasticnet = ElasticNet(client=client)
elasticnet.fit(X, y)
y_hat = elasticnet.predict(X)
assert r2_score(y.compute(), y_hat.compute()) >= 0.96
| 38.407407
| 74
| 0.54677
| 692
| 6,222
| 4.74422
| 0.221098
| 0.073104
| 0.140725
| 0.021322
| 0.774596
| 0.774596
| 0.742309
| 0.742309
| 0.717941
| 0.707889
| 0
| 0.050549
| 0.341851
| 6,222
| 161
| 75
| 38.645963
| 0.75116
| 0.097236
| 0
| 0.761062
| 0
| 0
| 0.03178
| 0
| 0
| 0
| 0
| 0
| 0.044248
| 1
| 0.035398
| false
| 0
| 0.061947
| 0
| 0.097345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7da532d52f74c9ba907eca534f00fb4d1324608f
| 212
|
py
|
Python
|
server/firebase/admin.py
|
andrewlee348/blackjack-strategy
|
58f795c9b13441aa1681a1d47084059fab2b92b2
|
[
"MIT"
] | null | null | null |
server/firebase/admin.py
|
andrewlee348/blackjack-strategy
|
58f795c9b13441aa1681a1d47084059fab2b92b2
|
[
"MIT"
] | null | null | null |
server/firebase/admin.py
|
andrewlee348/blackjack-strategy
|
58f795c9b13441aa1681a1d47084059fab2b92b2
|
[
"MIT"
] | null | null | null |
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
cred = credentials.Certificate("./credentials.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
| 26.5
| 52
| 0.839623
| 26
| 212
| 6.653846
| 0.5
| 0.300578
| 0.196532
| 0.265896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084906
| 212
| 8
| 53
| 26.5
| 0.891753
| 0
| 0
| 0
| 0
| 0
| 0.084507
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8195df0591b1b1927de4d903624cf976dca60569
| 37,487
|
py
|
Python
|
modules/npcore/layer/objectives.py
|
tuantle/simple_nn_with_numpy
|
4bf5ba23e2df7879030de85eb22b8e30ad9708de
|
[
"MIT"
] | 1
|
2019-01-31T20:24:34.000Z
|
2019-01-31T20:24:34.000Z
|
modules/npcore/layer/objectives.py
|
tuantle/simple_nn_with_numpy
|
4bf5ba23e2df7879030de85eb22b8e30ad9708de
|
[
"MIT"
] | null | null | null |
modules/npcore/layer/objectives.py
|
tuantle/simple_nn_with_numpy
|
4bf5ba23e2df7879030de85eb22b8e30ad9708de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2016-present Tuan Le.
#
# Licensed under the MIT License.
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/mit-license.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
#
# Author Tuan Le (tuan.t.lei@gmail.com)
#
# ------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import abc
import copy
import warnings
import json
import numpy as np
from util.const import CONST
from util.validation import (
MShape,
MType,
OneOfType
)
from npcore.layer.layer import Layer
# ------------------------------------------------------------------------
class OBJECTIVE(CONST):
LABEL = 'objective'
MAE_LOSS_LABEL = 'mae_loss'
MSE_LOSS_LABEL = 'mse_loss'
LOG_COSH_LOSS_LABEL = 'log_cosh_loss'
XTANH_LOSS_LABEL = 'xtanh_loss'
XSIGMOID_LOSS_LABEL = 'xsigmoid_loss'
ALGEBRAIC_LOSS_LABEL = 'algebraic_loss'
SIGMOID_CROSSENTROPY_LOSS = 'sigmoid_crossentropy_loss'
SOFTMAX_CROSSENTROPY_LOSS = 'softmax_crossentropy_loss'
ARRANGEMENT = ('2', '')
# ------------------------------------------------------------------------
class Objective(Layer):
_label = OBJECTIVE.LABEL
_arrangement = OBJECTIVE.ARRANGEMENT
"""
Abtraction of a base objective layer. Manages objective loss.
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._y_t = None
self._y_prime_t = None
self._evaluation = {
'count': 0,
'metric': {}
}
self._residue = {}
self._monitor = None
super().__init__(shape=(1, size), name=name)
self.reconfig(metric=metric)
def __str__(self):
return super().__str__() + '_' + OBJECTIVE.LABEL
# ------------------------------------------------------------------------
@property
def inputs(self):
"""
Get objective forward pass input tensor.
Returns:
tensor
"""
if self.has_prev:
return self.prev.outputs
else:
return None
@property
def outputs(self):
"""
Get objective forward pass output tensor
Returns:
tensor
"""
if self._y_t is not None:
return self._y_t.copy()
else:
return None
@property
def evaluation_metric(self):
"""
Get objective evaluation metric
"""
evaluation_count = self._evaluation['count']
evaluation_metric = copy.deepcopy(self._evaluation['metric'])
if evaluation_count > 1:
for key in evaluation_metric.keys():
evaluation_metric[key] /= evaluation_count
return evaluation_metric
def unassign_hooks(self):
"""
Unassign all callback functions
"""
self._monitor = None
@MType(monitor=OneOfType(callable, None))
def assign_hook(self, *,
monitor=None):
"""
Assign callback functions
Arguments:
monitor: callback function to do probing during forward/backward pass
"""
if monitor is not None:
self._monitor = monitor
def reset(self):
"""
Reset internal states.
"""
self._y_t = None
self._y_prime_t = None
self._residue = {}
self._evaluation['count'] = 0
for key in self._evaluation['metric'].keys():
self._evaluation['metric'][key] = 0
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(as_json=bool, beautify_json=bool)
def snapshot(self, *, as_json=False, beautify_json=True):
"""
Return objective as a snapshot dict data
Arguments:
as_json:
beautify_json:
Returns:
snapshot
"""
snapshot = super().snapshot(as_json=False, beautify_json=False)
snapshot.update({
'base_label': Objective.label + '_' + snapshot['base_label'],
'metric': tuple(self._evaluation['metric'].keys())
})
if as_json:
if beautify_json:
return json.dumps(snapshot, indent=4, sort_keys=False)
else:
return json.dumps(snapshot)
else:
return snapshot.copy()
@MType(dict, np.ndarray, residue=dict)
@MShape(axis=1)
def forward(self, stage, a_t, *, residue={}):
"""
Do forward pass method.
Arguments:
stage: forward stage
a_t: post-nonlinearity (a) tensor
residue:
Returns:
layer
"""
self._y_t = a_t # a_t.copy()
self._residue = residue
if self._monitor is not None:
report = {
'pass': 'forward',
'stage': stage,
'inputs': self.inputs,
'outputs': self.outputs,
'residue': residue
}
self._monitor(report)
if self.has_next:
warnings.warn(f'Objective {self.name} layer must be the last in connection. There should be no connection to next layer.', UserWarning)
return self
@MType(np.ndarray)
@MShape(axis=1)
def evaluate(self, y_prime_t):
"""
Get evaluation metric given the expected truth.
Arguments:
y_prime_t: expected output (y) tensor
Returns:
self
"""
self._evaluation['count'] += 1
self._y_prime_t = y_prime_t # y_prime_t.copy()
evaluation_metric = self._evaluation['metric']
(ly_t, residue) = self.compute_loss(self._y_t, self._y_prime_t, residue=self._residue)
metric = self.compute_evaluation_metric(self._y_t, self._y_prime_t, ly_t, evaluation_metric)
self._evaluation['metric'] = metric
self._residue = residue
return self
@MType(dict)
def backward(self, stage):
"""
Do backward pass by passing the loss gradient tensor back to the prev link.
Arguments:
stage: backward stage
Returns:
layer
"""
if self._y_t is None:
warnings.warn(f'Objective {self.name} cannot do backward pass. Need to run forward pass first.', UserWarning)
return self
elif self._y_prime_t is None:
warnings.warn(f'Objective {self.name} cannot do backward pass. Need to run evaluation first.', UserWarning)
return self
else:
hparam = stage['hparam']
batch_size = hparam['batch_size']
(eyg_t, residue) = self.compute_loss_grad(self._y_t, self._y_prime_t, residue=self._residue)
eyg_t = eyg_t / batch_size if batch_size > 1 else eyg_t
if self._monitor is not None:
report = {
'pass': 'backward',
'stage': stage,
'error': self._ey_t,
'grad': {
'error': eyg_t
},
'evaluation': self._evaluation,
'residue': residue
}
self._monitor(report)
if self.has_prev:
return self.prev.backward(stage, eyg_t, residue=residue)
else:
warnings.warn(f'Objective {self.name} connection is incomplete. Missing connection to previous layer.', UserWarning)
return self
@abc.abstractmethod
def compute_evaluation_metric(self):
"""
Compute the evaluation metric.
"""
pass
@abc.abstractmethod
def compute_loss(self):
"""
Compute the loss tensor. Not implemented
"""
pass
@abc.abstractmethod
def compute_loss_grad(self):
"""
Compute the loss gradient tensor for backpropagation. Not implemented
"""
pass
# ------------------------------------------------------------------------
class MAELoss(Objective):
_label = OBJECTIVE.MAE_LOSS_LABEL
"""
Objective using mean absolute error for loss function
"""
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Mean absolute error objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
ly_t = np.abs(ey_t)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
eyg_t = np.vectorize(lambda element: (element and 1) or (not element and -1))(y_t > y_prime_t)
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class MSELoss(Objective):
_label = OBJECTIVE.MSE_LOSS_LABEL
"""
Objective using mean square error for loss function.
"""
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Mean square error objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
ly_t = np.square(ey_t)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = 2 * ey_t
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class LogCoshLoss(Objective):
_label = OBJECTIVE.LOG_COSH_LOSS_LABEL
"""
Objective using log-cosh loss for loss functionself.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the l2 loss, but will not be so strongly affected by the
occasional wildly incorrect prediction.
"""
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Log-cosh loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
ly_t = np.log(np.cosh(ey_t) + 1e-12)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = np.tanh(ey_t)
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class XTanhLoss(Objective):
_label = OBJECTIVE.XTANH_LOSS_LABEL
"""
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._cache = None
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'XTanh loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
tanh_of_ey_t = np.tanh(ey_t)
ly_t = np.multiply(ey_t, tanh_of_ey_t)
self._cache = tanh_of_ey_t
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
tanh_of_ey_t = self._cache
eyg_t = tanh_of_ey_t + ey_t * (1 - np.square(tanh_of_ey_t))
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class XSigmoidLoss(Objective):
_label = OBJECTIVE.XSIGMOID_LOSS_LABEL
"""
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._cache = None
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'XSigmoid loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
sigmoid_of_ey_t = np.exp(-np.logaddexp(0, -ey_t + 1e-12))
ly_t = np.multiply(2 * ey_t, sigmoid_of_ey_t) - ey_t
self._cache = sigmoid_of_ey_t
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
sigmoid_of_ey_t = self._cache
eyg_t = 2 * sigmoid_of_ey_t + np.multiply(np.multiply(2 * ey_t, np.exp(-ey_t)), np.square(sigmoid_of_ey_t)) - 1
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class AlgebraicLoss(Objective):
_label = OBJECTIVE.ALGEBRAIC_LOSS_LABEL
"""
Arguments:
size: objective size
name: objective name
metric: loss metric
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss',)):
self._cache = None
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc') in metric:
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric or \
('recall' or 'rc') in metric or \
('precision' or 'prec') in metric or \
('f1_score' or 'f1') in metric:
warnings.warn(f'Algebraic loss objective only have loss metric. Ignoring metrics {metric}', UserWarning)
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
sqr_of_ey_t = np.square(ey_t)
inv_of_ey_t = 1 / (1 + sqr_of_ey_t)
inv_sqrt_of_ey_t = np.sqrt(inv_of_ey_t)
ly_t = np.multiply(sqr_of_ey_t, inv_sqrt_of_ey_t)
self._cache = (sqr_of_ey_t, inv_of_ey_t, inv_sqrt_of_ey_t)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
(sqr_of_ey_t, inv_of_ey_t, inv_sqrt_of_ey_t) = self._cache
eyg_t = np.multiply(2 * ey_t + np.multiply(ey_t, sqr_of_ey_t), np.multiply(inv_of_ey_t, inv_sqrt_of_ey_t))
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
return evaluation_metric
# ------------------------------------------------------------------------
class SigmoidCrossentropyLoss(Objective):
_label = OBJECTIVE.SIGMOID_CROSSENTROPY_LOSS
"""
Objective using sigmoid (binary)crossentropyfor loss function.
Arguments:
size: objective size
name: objective name
metric: loss and accuracy metrics
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss', 'accuracy')):
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc'):
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric:
self._evaluation['metric']['accuracy'] = 0
if ('recall' or 'rc') in metric:
self._evaluation['metric']['recall'] = 0
if ('precision' or 'prec') in metric:
self._evaluation['metric']['precision'] = 0
if ('f1_score' or 'f1') in metric:
self._evaluation['metric']['f1_score'] = 0
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(dict, np.ndarray, residue=dict)
@MShape(axis=1)
def forward(self, stage, a_t, *, residue={}):
"""
Do forward pass method.
Arguments:
stage: forward stage
a_t: post-nonlinearity (a) tensor
residue:
Returns:
layer
"""
sigmoid_of_a_t = np.exp(-np.logaddexp(0, -a_t + 1e-12))
return super().forward(stage, sigmoid_of_a_t, residue=residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
y_prime_t = y_prime_t.astype(np.float32)
ly_t = -(y_prime_t * np.log(y_t + 1e-12) + (1 - y_prime_t) * np.log((1 - y_t) + 1e-12))
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = ey_t
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
if 'accuracy' in evaluation_metric:
evaluation_metric['accuracy'] += np.equal(y_prime_t, y_t.round()).astype(np.int8).mean()
if 'recall' in evaluation_metric or 'precision' in evaluation_metric or 'f1_score' in evaluation_metric:
y_t = np.round(y_t)
true_pos = np.sum(np.multiply(y_t, y_prime_t), axis=0).astype(np.float)
# true_neg = np.sum(np.multiply((1 - y_t), (1 - y_prime_t)), axis=0).astype(np.float)
false_pos = np.sum(np.multiply(y_t, (1 - y_prime_t)), axis=0).astype(np.float)
false_neg = np.sum(np.multiply((1 - y_t), y_prime_t), axis=0).astype(np.float)
recall = true_pos / (true_pos + false_neg + 1e-12)
precision = true_pos / (true_pos + false_pos + 1e-12)
if 'recall' in evaluation_metric:
evaluation_metric['recall'] = recall.mean()
if 'precision' in evaluation_metric:
evaluation_metric['precision'] = precision.mean()
if 'f1_score' in evaluation_metric:
evaluation_metric['f1_score'] = (2 * np.multiply(precision, recall) / (precision + recall + 1e-12)).mean()
return evaluation_metric
# ------------------------------------------------------------------------
class SoftmaxCrossentropyLoss(Objective):
_label = OBJECTIVE.SOFTMAX_CROSSENTROPY_LOSS
"""
Objective using softmax (multinomial)crossentropyfor loss function.
Arguments:
size: objective size
name: objective name
metric: loss and accuracy metrics
"""
@MType(size=int,
name=str,
metric=(str,))
def __init__(self, *,
size=1,
name='',
metric=('loss', 'accuracy')):
super().__init__(size=size, name=name)
self.reconfig(metric=metric)
# ------------------------------------------------------------------------
@MType(shape=OneOfType((int,), None),
metric=OneOfType((str,), None))
def reconfig(self, *,
shape=None,
metric=None):
"""
Reconfig objective
Arguments:
shape: objective layer shape
metric: loss metric
"""
if metric is not None:
if 'loss' in metric or ('accuracy' or 'acc'):
if 'loss' in metric:
self._evaluation['metric']['loss'] = 0
if ('accuracy' or 'acc') in metric:
self._evaluation['metric']['accuracy'] = 0
if ('recall' or 'rc') in metric:
self._evaluation['metric']['recall'] = 0
if ('precision' or 'prec') in metric:
self._evaluation['metric']['precision'] = 0
if ('f1_score' or 'f1') in metric:
self._evaluation['metric']['f1_score'] = 0
else:
raise TypeError(f'Unknown metric {metric} for objective {self.name}.')
if shape is not None:
super().reconfig(shape=shape)
self.reset()
@MType(dict, np.ndarray, residue=dict)
@MShape(axis=1)
def forward(self, stage, a_t, *, residue={}):
"""
Do forward pass method.
Arguments:
stage: forward stage
a_t: post-nonlinearity (a) tensor
residue:
Returns:
layer
"""
exps_a_t = np.exp(a_t - a_t.max(axis=1, keepdims=True))
softmax_a_t = exps_a_t / exps_a_t.sum(axis=1, keepdims=True)
return super().forward(stage, softmax_a_t, residue=residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
y_prime_t = y_prime_t.astype(np.float32)
ly_t = -np.log(y_t[range(y_t.shape[0]), y_prime_t.argmax(axis=1)] + 1e-12)
return (ly_t, residue)
@MType(np.ndarray, np.ndarray, dict)
def compute_loss_grad(self, y_t, y_prime_t, *, residue={}):
"""
Compute the loss gradient tensor for gradient descent update.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
residue:
Returns:
tuple
"""
ey_t = y_t - y_prime_t
eyg_t = ey_t
return (eyg_t, residue)
@MType(np.ndarray, np.ndarray, np.ndarray, dict)
def compute_evaluation_metric(self, y_t, y_prime_t, ly_t, evaluation_metric):
"""
Compute the evaluation metric.
Arguments:
y_t: output (y) tensor
y_prime_t: expected output (y) tensor
ly_t: loss tensor
evaluation_metric:
Returns:
metric
"""
if 'loss' in evaluation_metric:
evaluation_metric['loss'] += ly_t.mean()
if 'accuracy' in evaluation_metric:
evaluation_metric['accuracy'] += np.equal(y_prime_t.argmax(axis=1), y_t.argmax(axis=1)).astype(np.int8).mean()
if 'recall' in evaluation_metric or 'precision' in evaluation_metric or 'f1_score' in evaluation_metric:
y_t = np.round(y_t)
true_pos = np.sum(np.multiply(y_t, y_prime_t), axis=0).astype(np.float)
# true_neg = np.sum(np.multiply((1 - y_t), (1 - y_prime_t)), axis=0).astype(np.float)
false_pos = np.sum(np.multiply(y_t, (1 - y_prime_t)), axis=0).astype(np.float)
false_neg = np.sum(np.multiply((1 - y_t), y_prime_t), axis=0).astype(np.float)
recall = true_pos / (true_pos + false_neg + 1e-12)
precision = true_pos / (true_pos + false_pos + 1e-12)
if 'recall' in evaluation_metric:
evaluation_metric['recall'] = recall.mean()
if 'precision' in evaluation_metric:
evaluation_metric['precision'] = precision.mean()
if 'f1_score' in evaluation_metric:
evaluation_metric['f1_score'] = (2 * np.multiply(precision, recall) / (precision + recall + 1e-12)).mean()
return evaluation_metric
| 32.739738
| 147
| 0.523862
| 4,240
| 37,487
| 4.432547
| 0.070755
| 0.090242
| 0.033521
| 0.020006
| 0.781473
| 0.7471
| 0.72603
| 0.715707
| 0.708045
| 0.706821
| 0
| 0.005968
| 0.333956
| 37,487
| 1,144
| 148
| 32.768357
| 0.746756
| 0.2129
| 0
| 0.719577
| 0
| 0.001764
| 0.098779
| 0.001976
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097002
| false
| 0.012346
| 0.017637
| 0.001764
| 0.239859
| 0.001764
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
81e2bfbbffa70b330e5871289001707f2fac8b15
| 3,922
|
py
|
Python
|
user_profile/tests.py
|
pwodyk/CI_MilestoneProject4
|
0f7402c3b707c3496d14c3aa711c652bf03f781c
|
[
"CC0-1.0"
] | null | null | null |
user_profile/tests.py
|
pwodyk/CI_MilestoneProject4
|
0f7402c3b707c3496d14c3aa711c652bf03f781c
|
[
"CC0-1.0"
] | 1
|
2021-06-01T23:53:20.000Z
|
2021-06-01T23:53:20.000Z
|
user_profile/tests.py
|
pawodyk/CI_MilestoneProject4
|
0f7402c3b707c3496d14c3aa711c652bf03f781c
|
[
"CC0-1.0"
] | 1
|
2019-06-28T20:55:47.000Z
|
2019-06-28T20:55:47.000Z
|
from django.test import TestCase
from django.apps import apps
from django.contrib.auth.models import User
from .apps import UserProfileConfig
from .views import *
from issue_tracker.models import Ticket
class TestGamesApps(TestCase):
"""Testing App"""
def test_app_name(self):
self.assertEqual("user_profile", UserProfileConfig.name)
self.assertEqual("user_profile", apps.get_app_config("user_profile").name)
class TestGamesViews(TestCase):
"""Testing Views"""
def test_get_user_profile_page(self):
u = User.objects.create_user(username="test_username", password="test_password")
self.assertTrue(u)
user = self.client.login(username="test_username", password="test_password")
page = self.client.get("/profile/")
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, "profile.html")
def test_get_user_profile_pass_attributes_to_template(self):
u = User.objects.create_user(username="test_username", password="test_password")
self.assertTrue(u)
user = self.client.login(username="test_username", password="test_password")
t = Ticket(name="test ticket", description="test description", ticket_type="B", created_by=u)
t.save()
page = self.client.get("/profile/")
attr = page.context
self.assertTrue(attr['user'])
self.assertTrue(attr['tickets'])
self.assertEqual(page.status_code, 200)
self.assertTemplateUsed(page, "profile.html")
def test_get_user_profile_has_fullname_ticket_type_of_Bug(self):
u = User.objects.create_user(username="test_username", password="test_password")
self.assertTrue(u)
user = self.client.login(username="test_username", password="test_password")
t = Ticket(name="test ticket", description="test description", ticket_type="B", created_by=u)
t.save()
page = self.client.get("/profile/")
tickets = list(page.context['tickets'])
ticket = tickets[0]
self.assertTrue(ticket['type_name'])
self.assertEqual(ticket['type_name'], "Bug")
def test_get_user_profile_has_fullname_ticket_type_of_Feature(self):
u = User.objects.create_user(username="test_username", password="test_password")
self.assertTrue(u)
user = self.client.login(username="test_username", password="test_password")
t = Ticket(name="test ticket", description="test description", ticket_type="F", created_by=u)
t.save()
page = self.client.get("/profile/")
tickets = list(page.context['tickets'])
ticket = tickets[0]
self.assertTrue(ticket['type_name'])
self.assertEqual(ticket['type_name'], "Feature")
def test_get_user_profile_has_fullname_status_of_submitted(self):
u = User.objects.create_user(username="test_username", password="test_password")
self.assertTrue(u)
user = self.client.login(username="test_username", password="test_password")
t = Ticket(name="test ticket", description="test description", ticket_type="F", created_by=u)
t.save()
page = self.client.get("/profile/")
tickets = list(page.context['tickets'])
ticket = tickets[0]
self.assertTrue(ticket['status_name'])
self.assertEqual(ticket['status_name'], "Submitted")
def test_get_user_profile_when_user_is_not_loged_in(self):
page = self.client.get("/profile/")
self.assertEqual(page.status_code, 302)
self.assertRedirects(page, "/accounts/login/?next=/profile/")
| 34.403509
| 101
| 0.627741
| 443
| 3,922
| 5.340858
| 0.164786
| 0.046492
| 0.084531
| 0.118343
| 0.739222
| 0.721471
| 0.721471
| 0.707946
| 0.707946
| 0.707946
| 0
| 0.004087
| 0.251402
| 3,922
| 113
| 102
| 34.707965
| 0.801771
| 0.006374
| 0
| 0.61194
| 0
| 0
| 0.161091
| 0.007977
| 0
| 0
| 0
| 0
| 0.313433
| 1
| 0.104478
| false
| 0.164179
| 0.089552
| 0
| 0.223881
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f202e8f4a91276adc1ab37b6ec7fcde7cc53e0ea
| 11,729
|
py
|
Python
|
django/bosstiles/views.py
|
ArnaudGallardo/boss
|
c0d3bbca31575ac5442822b8d7f962def32d9072
|
[
"Apache-2.0"
] | null | null | null |
django/bosstiles/views.py
|
ArnaudGallardo/boss
|
c0d3bbca31575ac5442822b8d7f962def32d9072
|
[
"Apache-2.0"
] | null | null | null |
django/bosstiles/views.py
|
ArnaudGallardo/boss
|
c0d3bbca31575ac5442822b8d7f962def32d9072
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.conf import settings
from boss import utils
from boss.throttling import BossThrottle
from bosscore.request import BossRequest
from bosscore.error import BossError, BossHTTPError, ErrorCodes
import spdb
import bossutils
from .renderers import PNGRenderer, JPEGRenderer
class CutoutTile(APIView):
"""
View to handle spatial cutouts by providing all datamodel fields
* Requires authentication.
"""
renderer_classes = (PNGRenderer, JPEGRenderer)
def __init__(self):
super().__init__()
self.data_type = None
self.bit_depth = None
def get(self, request, collection, experiment, channel, orientation, resolution, x_args, y_args, z_args, t_args=None):
"""
View to handle GET requests for a cuboid of data while providing all params
:param request: DRF Request object
:type request: rest_framework.request.Request
:param collection: Unique Collection identifier, indicating which collection you want to access
:param experiment: Experiment identifier, indicating which experiment you want to access
:param channel: Channel identifier, indicating which channel you want to access
:param orientation: Image plane requested. Vaid options include xy,xz or yz
:param resolution: Integer indicating the level in the resolution hierarchy (0 = native)
:param x_args: Python style range indicating the X coordinates of where to post the cuboid (eg. 100:200)
:param y_args: Python style range indicating the Y coordinates of where to post the cuboid (eg. 100:200)
:param z_args: Python style range indicating the Z coordinates of where to post the cuboid (eg. 100:200)
:return:
"""
# Process request and validate
try:
request_args = {
"service": "image",
"collection_name": collection,
"experiment_name": experiment,
"channel_name": channel,
"orientation" : orientation,
"resolution": resolution,
"x_args": x_args,
"y_args": y_args,
"z_args": z_args,
"time_args": t_args
}
req = BossRequest(request, request_args)
except BossError as err:
return err.to_http()
#Define access mode
access_mode = utils.get_access_mode(request)
# Convert to Resource
resource = spdb.project.BossResourceDjango(req)
# Get bit depth
try:
self.bit_depth = resource.get_bit_depth()
except ValueError:
return BossHTTPError("Datatype does not match channel", ErrorCodes.DATATYPE_DOES_NOT_MATCH)
# Make sure cutout request is under 1GB UNCOMPRESSED
total_bytes = req.get_x_span() * req.get_y_span() * req.get_z_span() * len(req.get_time()) * (self.bit_depth/8)
if total_bytes > settings.CUTOUT_MAX_SIZE:
return BossHTTPError("Cutout request is over 1GB when uncompressed. Reduce cutout dimensions.",
ErrorCodes.REQUEST_TOO_LARGE)
# Add metrics to CloudWatch
cost = ( req.get_x_span()
* req.get_y_span()
* req.get_z_span()
* (req.get_time().stop - req.get_time().start)
* self.bit_depth
/ 8
) # Calculating the number of bytes
BossThrottle().check('image_egress',
request.user,
cost)
boss_config = bossutils.configuration.BossConfig()
dimensions = [
{'Name': 'User', 'Value': request.user.username},
{'Name': 'Resource', 'Value': '{}/{}/{}'.format(collection,
experiment,
channel)},
{'Name': 'Stack', 'Value': boss_config['system']['fqdn']},
]
session = bossutils.aws.get_session()
client = session.client('cloudwatch')
client.put_metric_data(
Namespace = "BOSS/Image",
MetricData = [{
'MetricName': 'InvokeCount',
'Dimensions': dimensions,
'Value': 1.0,
'Unit': 'Count'
}, {
'MetricName': 'EgressCost',
'Dimensions': dimensions,
'Value': cost,
'Unit': 'Bytes'
}]
)
# Get interface to SPDB cache
cache = spdb.spatialdb.SpatialDB(settings.KVIO_SETTINGS,
settings.STATEIO_CONFIG,
settings.OBJECTIO_CONFIG)
# Get the params to pull data out of the cache
corner = (req.get_x_start(), req.get_y_start(), req.get_z_start())
extent = (req.get_x_span(), req.get_y_span(), req.get_z_span())
# Do a cutout as specified
data = cache.cutout(resource, corner, extent, req.get_resolution(),
[req.get_time().start, req.get_time().stop], access_mode=access_mode)
# Covert the cutout back to an image and return it
if orientation == 'xy':
img = data.xy_image()
elif orientation == 'yz':
img = data.yz_image()
elif orientation == 'xz':
img = data.xz_image()
else:
return BossHTTPError("Invalid orientation: {}".format(orientation),
ErrorCodes.INVALID_CUTOUT_ARGS)
return Response(img)
class Tile(APIView):
"""
View to handle tile interface when accessing via tile indicies
* Requires authentication.
"""
renderer_classes = (PNGRenderer, JPEGRenderer)
def __init__(self):
super().__init__()
self.data_type = None
self.bit_depth = None
def get(self, request, collection, experiment, channel, orientation, tile_size, resolution, x_idx, y_idx, z_idx, t_idx=None):
"""
View to handle GET requests for a tile when providing indices. Currently only supports XY plane
:param request: DRF Request object
:type request: rest_framework.request.Request
:param collection: Unique Collection identifier, indicating which collection you want to access
:param experiment: Experiment identifier, indicating which experiment you want to access
:param channel: Channel identifier, indicating which channel you want to access
:param resolution: Integer indicating the level in the resolution hierarchy (0 = native)
:param x_idx: the tile index in the X dimension
:param y_idx: the tile index in the Y dimension
:param z_idx: the tile index in the Z dimension
:param t_idx: the tile index in the T dimension
:return:
"""
# TODO: DMK Merge Tile and Image view once updated request validation is sorted out
# Process request and validate
try:
request_args = {
"service": "tile",
"collection_name": collection,
"experiment_name": experiment,
"channel_name": channel,
"orientation": orientation,
"tile_size": tile_size,
"resolution": resolution,
"x_args": x_idx,
"y_args": y_idx,
"z_args": z_idx,
"time_args": t_idx
}
req = BossRequest(request, request_args)
except BossError as err:
return err.to_http()
#Define access_mode
access_mode = utils.get_access_mode(request)
# Convert to Resource
resource = spdb.project.BossResourceDjango(req)
# Get bit depth
try:
self.bit_depth = resource.get_bit_depth()
except ValueError:
return BossHTTPError("Datatype does not match channel", ErrorCodes.DATATYPE_DOES_NOT_MATCH)
# Make sure cutout request is under 1GB UNCOMPRESSED
total_bytes = req.get_x_span() * req.get_y_span() * req.get_z_span() * len(req.get_time()) * (self.bit_depth/8)
if total_bytes > settings.CUTOUT_MAX_SIZE:
return BossHTTPError("Cutout request is over 1GB when uncompressed. Reduce cutout dimensions.",
ErrorCodes.REQUEST_TOO_LARGE)
# Add metrics to CloudWatch
cost = ( req.get_x_span()
* req.get_y_span()
* req.get_z_span()
* (req.get_time().stop - req.get_time().start)
* self.bit_depth
/ 8
) # Calculating the number of bytes
BossThrottle().check('tile_egress',
request.user,
cost)
boss_config = bossutils.configuration.BossConfig()
dimensions = [
{'Name': 'User', 'Value': request.user.username},
{'Name': 'Resource', 'Value': '{}/{}/{}'.format(collection,
experiment,
channel)},
{'Name': 'Stack', 'Value': boss_config['system']['fqdn']},
]
session = bossutils.aws.get_session()
client = session.client('cloudwatch')
client.put_metric_data(
Namespace = "BOSS/Tile",
MetricData = [{
'MetricName': 'InvokeCount',
'Dimensions': dimensions,
'Value': 1.0,
'Unit': 'Count'
}, {
'MetricName': 'EgressCost',
'Dimensions': dimensions,
'Value': cost,
'Unit': 'Bytes'
}]
)
# Get interface to SPDB cache
cache = spdb.spatialdb.SpatialDB(settings.KVIO_SETTINGS,
settings.STATEIO_CONFIG,
settings.OBJECTIO_CONFIG)
# Get the params to pull data out of the cache
corner = (req.get_x_start(), req.get_y_start(), req.get_z_start())
extent = (req.get_x_span(), req.get_y_span(), req.get_z_span())
# Do a cutout as specified
data = cache.cutout(resource, corner, extent, req.get_resolution(),
[req.get_time().start, req.get_time().stop], access_mode=access_mode)
# Covert the cutout back to an image and return it
if orientation == 'xy':
img = data.xy_image()
elif orientation == 'yz':
img = data.yz_image()
elif orientation == 'xz':
img = data.xz_image()
else:
return BossHTTPError("Invalid orientation: {}".format(orientation),
ErrorCodes.INVALID_CUTOUT_ARGS)
return Response(img)
| 40.030717
| 129
| 0.579333
| 1,269
| 11,729
| 5.197006
| 0.213554
| 0.034572
| 0.021228
| 0.013647
| 0.784382
| 0.772252
| 0.74511
| 0.74511
| 0.721759
| 0.721759
| 0
| 0.005118
| 0.333703
| 11,729
| 292
| 130
| 40.167808
| 0.838772
| 0.270867
| 0
| 0.788889
| 0
| 0
| 0.101502
| 0
| 0
| 0
| 0
| 0.003425
| 0
| 1
| 0.022222
| false
| 0
| 0.061111
| 0
| 0.161111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f20af8b609aa02c827f80471c29174cfa8770585
| 31
|
py
|
Python
|
SSD/SSD_FPN_GIoU/utils/detection/__init__.py
|
ForrestPi/ObjectDetection
|
54e0821e73f67be5360c36f01229a123c34ab3b3
|
[
"MIT"
] | 12
|
2020-03-25T01:24:22.000Z
|
2021-09-18T06:40:16.000Z
|
utils/detection/__init__.py
|
Yang-Zhaowei/PowerBank
|
0d6766038bd3ee37036e4255713d5c06e81a83ed
|
[
"MIT"
] | 1
|
2020-04-22T07:52:36.000Z
|
2020-04-22T07:52:36.000Z
|
utils/detection/__init__.py
|
Yang-Zhaowei/PowerBank
|
0d6766038bd3ee37036e4255713d5c06e81a83ed
|
[
"MIT"
] | 4
|
2020-03-25T01:24:26.000Z
|
2020-09-20T11:29:09.000Z
|
from .detection import Detect
| 10.333333
| 29
| 0.806452
| 4
| 31
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 2
| 30
| 15.5
| 0.961538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f21750106913345aed754625d81a14d4de0db439
| 71
|
py
|
Python
|
airbyte-integrations/bases/airbyte-protocol/airbyte_protocol/models/__init__.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | 6,215
|
2020-09-21T13:45:56.000Z
|
2022-03-31T21:21:45.000Z
|
airbyte-integrations/bases/airbyte-protocol/airbyte_protocol/models/__init__.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | 8,448
|
2020-09-21T00:43:50.000Z
|
2022-03-31T23:56:06.000Z
|
airbyte-integrations/bases/airbyte-protocol/airbyte_protocol/models/__init__.py
|
rajatariya21/airbyte
|
11e70a7a96e2682b479afbe6f709b9a5fe9c4a8d
|
[
"MIT"
] | 1,251
|
2020-09-20T05:48:47.000Z
|
2022-03-31T10:41:29.000Z
|
# generated by generate-protocol-files
from .airbyte_protocol import *
| 23.666667
| 38
| 0.816901
| 9
| 71
| 6.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 2
| 39
| 35.5
| 0.904762
| 0.507042
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
480d64545892b5009b0dc43889ffa627e70593d5
| 17,228
|
py
|
Python
|
neurora/stats_cal.py
|
neurora/neurora.io
|
eff6b715c89daae499aeb75450a26657d8cd3e4c
|
[
"MIT"
] | 50
|
2019-08-29T06:09:30.000Z
|
2022-03-20T02:24:36.000Z
|
neurora/stats_cal.py
|
neurora/neurora.io
|
eff6b715c89daae499aeb75450a26657d8cd3e4c
|
[
"MIT"
] | 3
|
2020-11-24T22:01:58.000Z
|
2021-11-26T02:09:52.000Z
|
neurora/stats_cal.py
|
neurora/neurora.io
|
eff6b715c89daae499aeb75450a26657d8cd3e4c
|
[
"MIT"
] | 14
|
2019-09-11T08:50:57.000Z
|
2022-01-04T09:19:47.000Z
|
# -*- coding: utf-8 -*-
' a module for conducting the statistical analysis '
__author__ = 'Zitong Lu'
import numpy as np
from scipy.stats import ttest_1samp, ttest_rel, ttest_ind
from neurora.stuff import permutation_test
' a function for conducting the statistical analysis for results of EEG-like data '
def stats(corrs, fisherz=True, permutation=True, iter=5000):
"""
Conduct the statistical analysis for results of EEG-like data
Parameters
----------
corrs : array
The correlation coefficients.
The shape of corrs must be [n_subs, n_chls, n_ts, 2]. n_subs, n_chls, n_ts represent the number of subjects, the
number of channels and the number of time-points. 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_chls, n_ts, 2]. n_chls, n_ts represent the number of channels and the number of
time-points. 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the correlation results of NPS, ISC, eeg-like RDMs-correlations.
"""
if len(np.shape(corrs)) != 4:
return "Invalid input!"
# get the number of subjects, channels & time-points
subs, chls, ts = np.shape(corrs)[:3]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# initialize the corrs
stats = np.zeros([chls, ts, 2], dtype=np.float)
# get r-map
rs = corrs[:, :, :, 0]
if fisherz == True:
zs = 0.5 * np.log((1 + rs) / (1 - rs))
#print(zs)
# calculate the statistical results
for i in range(chls):
for j in range(ts):
# t test
stats[i, j] = ttest_1samp(zs[:, i, j], 0, alternative="greater")
if permutation == True:
stats[i, j, 1] = permutation_test(zs[:, i, j], np.zeros([subs]), iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (searchlight) '
def stats_fmri(corrs, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (searchlight)
Parameters
----------
corrs : array
The correlation coefficients.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the results of searchlight fMRI NPS and searchlight fMRI RDM-correlations.
"""
if len(np.shape(corrs)) != 5:
return "Invalid input!"
# get the number of subjects
subs = np.shape(corrs)[0]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs)[1:4]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs = corrs[:, :, :, :, 0]
if fisherz is True:
zs = 0.5 * np.log((1+rs)/(1-rs))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_1samp(zs[:, i, j, k], 0, alternative="greater")
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs[:, i, j, k], np.zeros([subs]), iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (searchlight) within group '
def stats_fmri_compare_withingroup(corrs1, corrs2, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (searchlight) (within group: corrs1 > corrs2)
Parameters
----------
corrs1 : array
The correlation coefficients under condition 1.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
corrs2 : array
The correlation coefficients under condition 2.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the results of searchlight fMRI NPS and searchlight fMRI RDM-correlations.
"""
if len(np.shape(corrs1)) != 5 or len(np.shape(corrs2)) != 5:
return "Invalid input!"
# get the number of subjects
subs = np.shape(corrs1)[0]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs1)[1:4]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs1 = corrs1[:, :, :, :, 0]
rs2 = corrs2[:, :, :, :, 0]
if fisherz is True:
zs1 = 0.5 * np.log((1+rs1)/(1-rs1))
zs2 = 0.5 * np.log((1+rs2)/(1-rs2))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_rel(zs1[:, i, j, k], zs2[:, i, j, k], alternative="greater")
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs1[:, i, j, k], zs2[:, i, j, k], iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (searchlight) between two groups'
def stats_fmri_compare_betweengroups(corrs1, corrs2, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (searchlight) (between 2 groups: group1 > group2)
Parameters
----------
corrs1 : array
The correlation coefficients for group 1.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
corrs2 : array
The correlation coefficients for group 2.
The shape of corrs must be [n_subs, n_x, n_y, n_z, 2]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis and 2 represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
This function can be used for the results of searchlight fMRI NPS and searchlight fMRI RDM-correlations.
"""
if len(np.shape(corrs1)) != 5 or len(np.shape(corrs2)) != 5:
return "Invalid input!"
# get the number of subjects
subs1 = np.shape(corrs1)[0]
subs2 = np.shape(corrs2)[0]
# subs>=6
if subs1 < 6 or subs2 < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs1)[1:4]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs1 = corrs1[:, :, :, :, 0]
rs2 = corrs2[:, :, :, :, 0]
if fisherz is True:
zs1 = 0.5 * np.log((1 + rs1) / (1 - rs1))
zs2 = 0.5 * np.log((1 + rs2) / (1 - rs2))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_ind(zs1[:, i, j, k], zs2[:, i, j, k], alternative="greater")
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs1[:, i, j, k], zs2[:, i, j, k], iter = iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (ISC searchlight) '
def stats_iscfmri(corrs, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (ISC searchlight)
Parameters
----------
corrs : array
The correlation coefficients.
The shape of corrs must be [n_ts, n_subs!/(2!*(n_subs-2)!), n_x, n_y, n_z, 2]. n_ts, n_subs, n_x, n_y, n_z
represent the number of subjects, the number of calculation units for searchlight along the x, y, z axis and 2
represents a r-value and a p-value.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_ts, n_x, n_y, n_z, 2]. n_ts, n_x, n_y, n_z represent the number of time-points, the
number of calculation units for searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 4 (n_subs!/(2!*(n_subs-2)!) >= 6).
"""
if len(np.shape(corrs)) != 6:
return "Invalid input!"
# get the number of time-points, pairs
ts, npairs = np.shape(corrs)[:2]
# n_subs!/(2!*(n_subs-2)!)>=6
if npairs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs)[2:5]
# initialize the corrs
stats = np.zeros([ts, n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs = corrs[:, :, :, :, :, 0]
if fisherz is True:
# Fisher r to z
zs = 0.5 * np.log((1 + rs) / (1 - rs))
# calculate the statistical results
for t in range(ts):
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[t, i, j, k] = ttest_1samp(zs[t, :, i, j, k], 0, alternative="greater")
if permutation == True:
stats[t, i, j, k, 1] = permutation_test(zs[t, :, i, j, k], np.zeros([npairs]), iter=iter)
return stats
' a function for conducting the statistical analysis for results of EEG-like data (for STPS) '
def stats_stps(corrs1, corrs2, fisherz=True, permutation=True, iter=5000):
"""
Conduct the statistical analysis for results of EEG-like data(for STPS)
Parameters
----------
corrs1 : array
The correlation coefficients under condition1.
The shape of corrs1 must be [n_subs, n_chls, n_ts]. n_subs, n_chls, n_ts represent the number of subjects, the
number of channels and the number of time-points.
corrs2 : array
The correlation coefficients under condition2.
The shape of corrs2 must be [n_subs, n_chls, n_ts]. n_subs, n_chls, n_ts represent the number of subjects, the
number of channels and the number of time-points.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_chls, n_ts, 2]. n_chls, n_ts represent the number of channels and the number of
time-points. 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
"""
if len(np.shape(corrs1)) != 3 or len(np.shape(corrs2)) != 3 or np.shape(corrs1)[1] != np.shape(corrs2)[1] or \
np.shape(corrs1)[2] != np.shape(corrs2)[2]:
return "Invalid input!"
# get the number of subjects, channels & time-points
subs, chls, ts = np.shape(corrs1)
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# initialize the corrs
stats = np.zeros([chls, ts, 2], dtype=np.float)
# get r-map
rs1 = corrs1
rs2 = corrs2
if fisherz is True:
# Fisher r to z
zs1 = 0.5 * np.log((1 + rs1) / (1 - rs1))
zs2 = 0.5 * np.log((1 + rs2) / (1 - rs2))
# calculate the statistical results
for i in range(chls):
for j in range(ts):
# t test
stats[i, j] = ttest_rel(zs1[:, i, j], zs2[:, i, j])
if permutation == True:
stats[i, j, 1] = permutation_test(zs1[:, i, j], zs2[:, i, j], iter=iter)
return stats
' a function for conducting the statistical analysis for results of fMRI data (STPS searchlight) '
def stats_stpsfmri(corrs1, corrs2, fisherz=True, permutation=False, iter=5000):
"""
Conduct the statistical analysis for results of fMRI data (STPS searchlight)
Parameters
----------
corrs1 : array
The correlation coefficients under condition1.
The shape of corrs1 must be [n_subs, n_x, n_y, n_z]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis.
corrs2 : array
The correlation coefficients under condition2.
The shape of corrs2 must be [n_subs, n_x, n_y, n_z]. n_subs, n_x, n_y, n_z represent the number of subjects,
the number of calculation units for searchlight along the x, y, z axis.
fisherz : bool True or False. Default is True.
Conduct Fisher-Z transform.
permutation : bool True or False. Default is False.
Use permutation test or not.
iter : int. Default is 5000.
The times for iteration.
Returns
-------
stats : array
The statistical results.
The shape of stats is [n_x, n_y, n_z, 2]. n_x, n_y, n_z represent the number of calculation units for
searchlight along the x, y, z axis and 2 represents a t-value and a p-value.
Notes
-----
n_subs must >= 6.
"""
if len(np.shape(corrs1)) != 4 or len(np.shape(corrs2)) != 4 or np.shape(corrs1)[1] != np.shape(corrs2)[1] \
or np.shape(corrs1)[2] != np.shape(corrs2)[2] or np.shape(corrs1)[3] != np.shape(corrs2)[3]:
return "Invalid input!"
# get the number of subjects
subs = np.shape(corrs1)[0]
# subs>=6
if subs < 6:
return print("the number of subjects is too small!")
# get the number of the calculation units in the x, y, z directions
n_x, n_y, n_z = np.shape(corrs1)[1:]
# initialize the corrs
stats = np.zeros([n_x, n_y, n_z, 2], dtype=np.float)
# get r-map
rs1 = corrs1
rs2 = corrs2
if fisherz == True:
# Fisher r to z
zs1 = 0.5 * np.log((1 + rs1) / (1 - rs1))
zs2 = 0.5 * np.log((1 + rs2) / (1 - rs2))
# calculate the statistical results
for i in range(n_x):
for j in range(n_y):
for k in range(n_z):
# t test
stats[i, j, k] = ttest_rel(zs1[:, i, j, k], zs2[:, i, j, k])
if permutation == True:
stats[i, j, k, 1] = permutation_test(zs1[:, i, j, k], zs2[:, i, j, k], iter=iter)
return stats
| 32.690702
| 120
| 0.606048
| 2,693
| 17,228
| 3.797995
| 0.057928
| 0.047517
| 0.058076
| 0.014079
| 0.950235
| 0.93596
| 0.915428
| 0.904869
| 0.899687
| 0.890008
| 0
| 0.027773
| 0.285233
| 17,228
| 527
| 121
| 32.690702
| 0.802826
| 0.5202
| 0
| 0.636986
| 0
| 0
| 0.148504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047945
| false
| 0
| 0.020548
| 0
| 0.212329
| 0.047945
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.