hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a89bff29c5f953408573b588357469aa0dc0348
| 425
|
py
|
Python
|
webapp/starter/config/settings/partials/AUTH.py
|
somacci/django-sample-docker
|
6033313a76f7444004143ce7d0143633dc12e09d
|
[
"MIT"
] | null | null | null |
webapp/starter/config/settings/partials/AUTH.py
|
somacci/django-sample-docker
|
6033313a76f7444004143ce7d0143633dc12e09d
|
[
"MIT"
] | null | null | null |
webapp/starter/config/settings/partials/AUTH.py
|
somacci/django-sample-docker
|
6033313a76f7444004143ce7d0143633dc12e09d
|
[
"MIT"
] | null | null | null |
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
| 26.5625
| 91
| 0.670588
| 31
| 425
| 9
| 0.354839
| 0.215054
| 0.243728
| 0.301075
| 0.55914
| 0.55914
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 425
| 15
| 92
| 28.333333
| 0.820588
| 0
| 0
| 0
| 0
| 0
| 0.653302
| 0.615566
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8ab413df49fe238447647dd743f2fd11591cd1af
| 337
|
py
|
Python
|
solardatatools/algorithms/__init__.py
|
catzzz/solar-data-tools
|
dc173c1036bc2e3116b302f3fd442b1cb030e0b0
|
[
"BSD-2-Clause"
] | 3
|
2019-02-26T18:06:12.000Z
|
2019-04-16T19:49:27.000Z
|
solardatatools/algorithms/__init__.py
|
catzzz/solar-data-tools
|
dc173c1036bc2e3116b302f3fd442b1cb030e0b0
|
[
"BSD-2-Clause"
] | 1
|
2019-03-28T19:02:37.000Z
|
2019-03-28T19:02:37.000Z
|
solardatatools/algorithms/__init__.py
|
catzzz/solar-data-tools
|
dc173c1036bc2e3116b302f3fd442b1cb030e0b0
|
[
"BSD-2-Clause"
] | 1
|
2019-03-06T17:52:27.000Z
|
2019-03-06T17:52:27.000Z
|
from solardatatools.algorithms.capacity_change import CapacityChange
from solardatatools.algorithms.time_shifts import TimeShift
from solardatatools.algorithms.sunrise_sunset_estimation import SunriseSunset
from solardatatools.algorithms.soiling import soiling_seperation
from solardatatools.algorithms.clipping import ClippingDetection
| 56.166667
| 77
| 0.910979
| 35
| 337
| 8.628571
| 0.514286
| 0.298013
| 0.463576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059347
| 337
| 5
| 78
| 67.4
| 0.952681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0a18c4ab2cb7a99e784a0e26bbf3a3908a4fd997
| 15,683
|
py
|
Python
|
deepFilter/dl_models.py
|
fperdigon/DeepFilter_as_in_Arxiv
|
d340a71942aee29d9655d6298c745390fc501ddc
|
[
"MIT"
] | null | null | null |
deepFilter/dl_models.py
|
fperdigon/DeepFilter_as_in_Arxiv
|
d340a71942aee29d9655d6298c745390fc501ddc
|
[
"MIT"
] | null | null | null |
deepFilter/dl_models.py
|
fperdigon/DeepFilter_as_in_Arxiv
|
d340a71942aee29d9655d6298c745390fc501ddc
|
[
"MIT"
] | null | null | null |
#============================================================
#
# Deep Learning BLW Filtering
# Deep Learning models
#
# author: Francisco Perdigon Romero
# email: fperdigon88@gmail.com
# github id: fperdigon
#
#===========================================================
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Conv1D, Flatten, Dropout, BatchNormalization,\
concatenate, Activation, Input, Conv2DTranspose, Lambda, LSTM, Reshape, Embedding
import keras.backend as K
def Conv1DTranspose(input_tensor, filters, kernel_size, strides=2, activation='relu', padding='same'):
"""
https://stackoverflow.com/a/45788699
input_tensor: tensor, with the shape (batch_size, time_steps, dims)
filters: int, output dimension, i.e. the output tensor will have the shape of (batch_size, time_steps, filters)
kernel_size: int, size of the convolution kernel
strides: int, convolution step size
padding: 'same' | 'valid'
"""
x = Lambda(lambda x: K.expand_dims(x, axis=2))(input_tensor)
x = Conv2DTranspose(filters=filters,
kernel_size=(kernel_size, 1),
activation=activation,
strides=(strides, 1),
padding=padding)(x)
x = Lambda(lambda x: K.squeeze(x, axis=2))(x)
return x
##########################################################################
###### MODULES #######
def LFilter_module(x, layers):
LB0 = Conv1D(filters=int(layers / 4),
kernel_size=3,
activation='linear',
strides=1,
padding='same')(x)
LB1 = Conv1D(filters=int(layers / 4),
kernel_size=5,
activation='linear',
strides=1,
padding='same')(x)
LB2 = Conv1D(filters=int(layers / 4),
kernel_size=9,
activation='linear',
strides=1,
padding='same')(x)
LB3 = Conv1D(filters=int(layers / 4),
kernel_size=15,
activation='linear',
strides=1,
padding='same')(x)
x = concatenate([LB0, LB1, LB2, LB3])
return x
def NLFilter_module(x, layers):
NLB0 = Conv1D(filters=int(layers / 4),
kernel_size=3,
activation='relu',
strides=1,
padding='same')(x)
NLB1 = Conv1D(filters=int(layers / 4),
kernel_size=5,
activation='relu',
strides=1,
padding='same')(x)
NLB2 = Conv1D(filters=int(layers / 4),
kernel_size=9,
activation='relu',
strides=1,
padding='same')(x)
NLB3 = Conv1D(filters=int(layers / 4),
kernel_size=15,
activation='relu',
strides=1,
padding='same')(x)
x = concatenate([NLB0, NLB1, NLB2, NLB3])
return x
def LANLFilter_module(x, layers):
LB0 = Conv1D(filters=int(layers / 8),
kernel_size=3,
activation='linear',
strides=1,
padding='same')(x)
LB1 = Conv1D(filters=int(layers / 8),
kernel_size=5,
activation='linear',
strides=1,
padding='same')(x)
LB2 = Conv1D(filters=int(layers / 8),
kernel_size=9,
activation='linear',
strides=1,
padding='same')(x)
LB3 = Conv1D(filters=int(layers / 8),
kernel_size=15,
activation='linear',
strides=1,
padding='same')(x)
NLB0 = Conv1D(filters=int(layers / 8),
kernel_size=3,
activation='relu',
strides=1,
padding='same')(x)
NLB1 = Conv1D(filters=int(layers / 8),
kernel_size=5,
activation='relu',
strides=1,
padding='same')(x)
NLB2 = Conv1D(filters=int(layers / 8),
kernel_size=9,
activation='relu',
strides=1,
padding='same')(x)
NLB3 = Conv1D(filters=int(layers / 8),
kernel_size=15,
activation='relu',
strides=1,
padding='same')(x)
x = concatenate([LB0, LB1, LB2, LB3, NLB0, NLB1, NLB2, NLB3])
return x
def LANLFilter_module_dilated(x, layers):
LB1 = Conv1D(filters=int(layers / 6),
kernel_size=5,
activation='linear',
dilation_rate=3,
padding='same')(x)
LB2 = Conv1D(filters=int(layers / 6),
kernel_size=9,
activation='linear',
dilation_rate=3,
padding='same')(x)
LB3 = Conv1D(filters=int(layers / 6),
kernel_size=15,
dilation_rate=3,
activation='linear',
padding='same')(x)
NLB1 = Conv1D(filters=int(layers / 6),
kernel_size=5,
activation='relu',
dilation_rate=3,
padding='same')(x)
NLB2 = Conv1D(filters=int(layers / 6),
kernel_size=9,
activation='relu',
dilation_rate=3,
padding='same')(x)
NLB3 = Conv1D(filters=int(layers / 6),
kernel_size=15,
dilation_rate=3,
activation='relu',
padding='same')(x)
x = concatenate([LB1, LB2, LB3, NLB1, NLB2, NLB3])
# x = BatchNormalization()(x)
return x
###### MODELS #######
def deep_filter_vanilla_linear():
model = Sequential()
model.add(Conv1D(filters=64,
kernel_size=9,
activation='linear',
input_shape=(512, 1),
strides=1,
padding='same'))
model.add(Conv1D(filters=64,
kernel_size=9,
activation='linear',
strides=1,
padding='same'))
model.add(Conv1D(filters=32,
kernel_size=9,
activation='linear',
strides=1,
padding='same'))
model.add(Conv1D(filters=32,
kernel_size=9,
activation='linear',
strides=1,
padding='same'))
model.add(Conv1D(filters=16,
kernel_size=9,
activation='linear',
strides=1,
padding='same'))
model.add(Conv1D(filters=16,
kernel_size=9,
activation='linear',
strides=1,
padding='same'))
model.add(Conv1D(filters=1,
kernel_size=9,
activation='linear',
strides=1,
padding='same'))
return model
def deep_filter_vanilla_Nlinear():
model = Sequential()
model.add(Conv1D(filters=64,
kernel_size=9,
activation='relu',
input_shape=(512, 1),
strides=1,
padding='same'))
model.add(Conv1D(filters=64,
kernel_size=9,
activation='relu',
strides=1,
padding='same'))
model.add(Conv1D(filters=32,
kernel_size=9,
activation='relu',
strides=1,
padding='same'))
model.add(Conv1D(filters=32,
kernel_size=9,
activation='relu',
strides=1,
padding='same'))
model.add(Conv1D(filters=16,
kernel_size=9,
activation='relu',
strides=1,
padding='same'))
model.add(Conv1D(filters=16,
kernel_size=9,
activation='relu',
strides=1,
padding='same'))
model.add(Conv1D(filters=1,
kernel_size=9,
activation='linear',
strides=1,
padding='same'))
return model
def deep_filter_I_linear():
input_shape = (None, 1)
input = Input(shape=input_shape)
tensor = LFilter_module(input, 64)
tensor = LFilter_module(tensor, 64)
tensor = LFilter_module(tensor, 32)
tensor = LFilter_module(tensor, 32)
tensor = LFilter_module(tensor, 16)
tensor = LFilter_module(tensor, 16)
predictions = Conv1D(filters=1,
kernel_size=9,
activation='linear',
strides=1,
padding='same')(tensor)
model = Model(inputs=[input], outputs=predictions)
return model
def deep_filter_I_Nlinear():
input_shape = (None, 1)
input = Input(shape=input_shape)
tensor = NLFilter_module(input, 64)
tensor = NLFilter_module(tensor, 64)
tensor = NLFilter_module(tensor, 32)
tensor = NLFilter_module(tensor, 32)
tensor = NLFilter_module(tensor, 16)
tensor = NLFilter_module(tensor, 16)
predictions = Conv1D(filters=1,
kernel_size=9,
activation='linear',
strides=1,
padding='same')(tensor)
model = Model(inputs=[input], outputs=predictions)
return model
def deep_filter_I_LANL():
# TODO: Make the doc
input_shape = (None, 1)
input = Input(shape=input_shape)
tensor = LANLFilter_module(input, 64)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module(tensor, 64)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module(tensor, 32)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module(tensor, 32)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module(tensor, 16)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module(tensor, 16)
tensor = BatchNormalization()(tensor)
predictions = Conv1D(filters=1,
kernel_size=9,
activation='linear',
strides=1,
padding='same')(tensor)
model = Model(inputs=[input], outputs=predictions)
return model
def deep_filter_model_I_LANL_dilated():
# TODO: Make the doc
input_shape = (None, 1)
input = Input(shape=input_shape)
tensor = LANLFilter_module(input, 64)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module_dilated(tensor, 64)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module(tensor, 32)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module_dilated(tensor, 32)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module(tensor, 16)
tensor = BatchNormalization()(tensor)
tensor = LANLFilter_module_dilated(tensor, 16)
tensor = BatchNormalization()(tensor)
predictions = Conv1D(filters=1,
kernel_size=9,
activation='linear',
strides=1,
padding='same')(tensor)
model = Model(inputs=[input], outputs=predictions)
return model
def FCN_DAE():
# Implementation of FCN_DAE approach presented in
# Chiang, H. T., Hsieh, Y. Y., Fu, S. W., Hung, K. H., Tsao, Y., & Chien, S. Y. (2019).
# Noise reduction in ECG signals using fully convolutional denoising autoencoders.
# IEEE Access, 7, 60806-60813.
input_shape = (None, 1)
input = Input(shape=input_shape)
x = Conv1D(filters=40,
input_shape=(512, 1),
kernel_size=16,
activation='elu',
strides=2,
padding='same')(input)
x = BatchNormalization()(x)
x = Conv1D(filters=20,
kernel_size=16,
activation='elu',
strides=2,
padding='same')(x)
x = BatchNormalization()(x)
x = Conv1D(filters=20,
kernel_size=16,
activation='elu',
strides=2,
padding='same')(x)
x = BatchNormalization()(x)
x = Conv1D(filters=20,
kernel_size=16,
activation='elu',
strides=2,
padding='same')(x)
x = BatchNormalization()(x)
x = Conv1D(filters=40,
kernel_size=16,
activation='elu',
strides=2,
padding='same')(x)
x = BatchNormalization()(x)
x = Conv1D(filters=1,
kernel_size=16,
activation='elu',
strides=1,
padding='same')(x)
x = BatchNormalization()(x)
# Keras has no 1D Traspose Convolution, instead we use Conv2DTranspose function
# in a souch way taht is mathematically equivalent
x = Conv1DTranspose(input_tensor=x,
filters=1,
kernel_size=16,
activation='elu',
strides=1,
padding='same')
x = BatchNormalization()(x)
x = Conv1DTranspose(input_tensor=x,
filters=40,
kernel_size=16,
activation='elu',
strides=2,
padding='same')
x = BatchNormalization()(x)
x = Conv1DTranspose(input_tensor=x,
filters=20,
kernel_size=16,
activation='elu',
strides=2,
padding='same')
x = BatchNormalization()(x)
x = Conv1DTranspose(input_tensor=x,
filters=20,
kernel_size=16,
activation='elu',
strides=2,
padding='same')
x = BatchNormalization()(x)
x = Conv1DTranspose(input_tensor=x,
filters=20,
kernel_size=16,
activation='elu',
strides=2,
padding='same')
x = BatchNormalization()(x)
x = Conv1DTranspose(input_tensor=x,
filters=40,
kernel_size=16,
activation='elu',
strides=2,
padding='same')
x = BatchNormalization()(x)
predictions = Conv1DTranspose(input_tensor=x,
filters=1,
kernel_size=16,
activation='linear',
strides=1,
padding='same')
model = Model(inputs=[input], outputs=predictions)
return model
def DRRN_denoising():
# Implementation of DRNN approach presented in
# Antczak, K. (2018). Deep recurrent neural networks for ECG signal denoising.
# arXiv preprint arXiv:1807.11551.
model = Sequential()
model.add(LSTM(64, input_shape=(None, 1), return_sequences=True))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1, activation='linear'))
return model
| 30.044061
| 119
| 0.491679
| 1,483
| 15,683
| 5.099798
| 0.12205
| 0.075367
| 0.075367
| 0.092953
| 0.790163
| 0.769668
| 0.766759
| 0.763321
| 0.737274
| 0.640354
| 0
| 0.043769
| 0.389594
| 15,683
| 522
| 120
| 30.044061
| 0.746266
| 0.076197
| 0
| 0.834625
| 0
| 0
| 0.033687
| 0
| 0
| 0
| 0
| 0.001916
| 0
| 1
| 0.033592
| false
| 0
| 0.010336
| 0
| 0.077519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a24f9484512ae6ffcbb08a7e8a2b233806d171b
| 2,701
|
py
|
Python
|
tests/test_childclasses.py
|
basfora/milp_sim
|
963b816c79e992964b4fe65813453fd73f499f30
|
[
"MIT"
] | null | null | null |
tests/test_childclasses.py
|
basfora/milp_sim
|
963b816c79e992964b4fe65813453fd73f499f30
|
[
"MIT"
] | null | null | null |
tests/test_childclasses.py
|
basfora/milp_sim
|
963b816c79e992964b4fe65813453fd73f499f30
|
[
"MIT"
] | null | null | null |
from milp_sim.risk.classes.child_mespp import MyInputs2, MySearcher2, MySolverData2
def get_specs():
specs = MyInputs2()
specs.set_graph(4)
# solver parameter: central x distributed
specs.set_solver_type('distributed')
# target motion
specs.set_target_motion('static')
# searchers' detection: capture range and false negatives
m = 2
specs.set_capture_range(0)
specs.set_size_team(m)
# position
v0 = [1, 1]
specs.set_start_searchers(v0)
b_0 = [0.0 for i in range(10)]
b_0[8] = 0.5
b_0[6] = 0.5
specs.set_b0(b_0)
# time-step stuff: deadline mission (tau), planning horizon (h), re-plan frequency (theta)
h = 3
specs.set_all_times(h)
specs.set_theta(1)
# solver timeout (in sec)
specs.set_timeout(10)
# danger stuff
specs.set_threshold([3, 4], 'kappa')
eta_true = [1, 3, 3, 4, 5, 3, 4, 4, 1]
eta_priori = eta_true
specs.set_danger_data(eta_true, 'true')
specs.set_danger_data(eta_priori, 'priori')
return specs
def get_specs3():
specs = MyInputs2()
specs.set_graph(4)
# solver parameter: central x distributed
specs.set_solver_type('distributed')
# target motion
specs.set_target_motion('static')
# searchers' detection: capture range and false negatives
m = 2
specs.set_capture_range(0)
specs.set_size_team(m)
# position
v0 = [1, 1]
specs.set_start_searchers(v0)
b_0 = [0.0 for i in range(10)]
b_0[8] = 0.5
b_0[6] = 0.5
specs.set_b0(b_0)
# time-step stuff: deadline mission (tau), planning horizon (h), re-plan frequency (theta)
h = 3
specs.set_all_times(h)
specs.set_theta(1)
# solver timeout (in sec)
specs.set_timeout(10)
# danger stuff
specs.set_threshold([3, 4], 'kappa')
specs.set_threshold([0.95, 0.90], 'alpha')
eta_true = [1, 3, 3, 4, 5, 3, 4, 4, 1]
eta_priori = eta_true
specs.set_danger_data(eta_true, 'true')
specs.set_danger_data(eta_priori, 'priori')
specs.set_danger_perception('prob')
return specs
def test_myinputs2():
specs = get_specs()
assert len(specs.graph.vs) == 9
assert specs.b0 == [0, 0, 0, 0, 0, 0, 0.5, 0, 0.5, 0]
assert specs.start_searcher_random is False
assert specs.start_searcher_v == [1, 1]
assert specs.horizon == 3
assert specs.kappa == [3, 4]
assert specs.danger_true == [1, 3, 3, 4, 5, 3, 4, 4, 1]
assert specs.danger_priori == [1, 3, 3, 4, 5, 3, 4, 4, 1]
assert specs.perception == 'point'
def test_specs_prob():
specs = get_specs3()
assert specs.alpha == [0.95, 0.90]
assert specs.kappa == [3, 4]
assert specs.perception == 'prob'
| 24.779817
| 94
| 0.636801
| 425
| 2,701
| 3.863529
| 0.218824
| 0.136419
| 0.012789
| 0.009744
| 0.742387
| 0.742387
| 0.738124
| 0.702801
| 0.702801
| 0.702801
| 0
| 0.064158
| 0.232506
| 2,701
| 108
| 95
| 25.009259
| 0.727931
| 0.181044
| 0
| 0.716418
| 0
| 0
| 0.037341
| 0
| 0
| 0
| 0
| 0
| 0.179104
| 1
| 0.059701
| false
| 0
| 0.014925
| 0
| 0.104478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a45cf3e325689b2ec86b81ecc90faca65658410
| 7,136
|
py
|
Python
|
src/CTL/tests/test_CTMRG.py
|
wistaria/CTL
|
ddb0f917369df4e7233b5ab097595c6ce254862e
|
[
"MIT"
] | 11
|
2021-06-23T15:47:03.000Z
|
2022-03-17T03:29:55.000Z
|
src/CTL/tests/test_CTMRG.py
|
wistaria/CTL
|
ddb0f917369df4e7233b5ab097595c6ce254862e
|
[
"MIT"
] | 1
|
2021-10-16T15:10:54.000Z
|
2021-10-16T15:10:54.000Z
|
src/CTL/tests/test_CTMRG.py
|
wistaria/CTL
|
ddb0f917369df4e7233b5ab097595c6ce254862e
|
[
"MIT"
] | 2
|
2021-06-23T09:11:20.000Z
|
2021-10-16T15:35:18.000Z
|
from CTL.tests.packedTest import PackedTest
from CTL.models.Ising import plaquetteIsingTensor, IsingTNFromUndirectedGraph
from CTL.examples.CTMRG import CTMRG
from CTL.funcs.graphFuncs import doubleSquareLatticeFBC
from CTL.tensor.contract.optimalContract import contractAndCostWithSequence
from CTL.examples.MPS import contractWithMPS
import CTL.funcs.funcs as funcs
class TestCTMRG(PackedTest):
def __init__(self, methodName = 'runTest'):
super().__init__(methodName = methodName, name = 'CTMRG')
def test_exactCTMRG(self):
# test case for non-interacting Ising model
weight = 0.0
doubleLatticeFBC = doubleSquareLatticeFBC(n = 3, m = 3, weight = weight) # 24 tensors
tensorNetwork = IsingTNFromUndirectedGraph(doubleLatticeFBC)
seq = [(2, 15), (14, 2), (5, 2), (9, 20), (21, 9), (6, 9), (16, 6), (11, 23), (22, 11), (8, 11), (19, 8), (10, 8), (6, 8), (7, 6), (18, 6), (2, 6), (17, 2), (4, 2), (1, 2), (13, 1), (3, 1), (12, 1), (0, 1)]
Z, cost = contractAndCostWithSequence(tensorList = tensorNetwork, seq = seq)
print('Z = {}, cost = {}'.format(Z.single(), cost))
# exactZ = 2694263494.5463686 # pre-calculated
# print('exact Z = {}'.format(exactZ))
# ZMPS = contractWithMPS(tensorList = tensorNetwork, chi = 16)
# print('Z from MPS = {}'.format(ZMPS.single()))
a = plaquetteIsingTensor(weight = weight, diamondForm = True)
ctmrg = CTMRG(a, chi = 16)
# for i in range(1, 5):
# print('CTMRG Z(L = {}) = {}'.format(i, ctmrg.getZ(L = i)))
# with self.assertWarns(RuntimeWarning):
ZCTMRG = ctmrg.getZ(L = 3)
print('CTMRG Z = {}'.format(ZCTMRG))
self.assertTrue(funcs.floatRelativeEqual(ZCTMRG, Z.single(), eps = 1e-10))
# test case for Ising model
weight = 0.5
for nn in range(1, 3):
doubleLatticeFBC = doubleSquareLatticeFBC(n = nn, m = nn, weight = weight) # 24 tensors
tensorNetwork = IsingTNFromUndirectedGraph(doubleLatticeFBC)
Z, cost = contractAndCostWithSequence(tensorList = tensorNetwork)
print('Z for L = {} is {}'.format(nn, Z.single()))
doubleLatticeFBC = doubleSquareLatticeFBC(n = 3, m = 3, weight = weight) # 24 tensors
tensorNetwork = IsingTNFromUndirectedGraph(doubleLatticeFBC)
seq = [(2, 15), (14, 2), (5, 2), (9, 20), (21, 9), (6, 9), (16, 6), (11, 23), (22, 11), (8, 11), (19, 8), (10, 8), (6, 8), (7, 6), (18, 6), (2, 6), (17, 2), (4, 2), (1, 2), (13, 1), (3, 1), (12, 1), (0, 1)]
Z, cost = contractAndCostWithSequence(tensorList = tensorNetwork, seq = seq)
print('Z = {}, cost = {}'.format(Z.single(), cost))
# exactZ = 2694263494.5463686 # pre-calculated
# print('exact Z = {}'.format(exactZ))
# ZMPS = contractWithMPS(tensorList = tensorNetwork, chi = 16)
# print('Z from MPS = {}'.format(ZMPS.single()))
a = plaquetteIsingTensor(weight = weight, diamondForm = True)
# for i in range(1, 5):
# print('CTMRG Z(L = {}) = {}'.format(i, ctmrg.getSingleZ(L = i)))
ctmrg = CTMRG(a, chi = 16)
# for i in range(1, 5):
# print('CTMRG Z(L = {}) = {}'.format(i, ctmrg.getZ(L = i)))
# with self.assertWarns(RuntimeWarning):
ZCTMRG = ctmrg.getZ(L = 3)
print('CTMRG Z = {}'.format(ZCTMRG))
self.assertTrue(funcs.floatRelativeEqual(ZCTMRG, Z.single(), eps = 1e-10))
weight = 0.5
doubleLatticeFBC = doubleSquareLatticeFBC(n = 5, m = 5, weight = weight) # 24 tensors
tensorNetwork = IsingTNFromUndirectedGraph(doubleLatticeFBC)
Z, cost = contractAndCostWithSequence(tensorList = tensorNetwork, seq = None, greedy = True)
print('Z = {}, cost = {}'.format(Z.single(), cost))
# ZMPS = contractWithMPS(tensorList = tensorNetwork, chi = 16)
# print('Z from MPS = {}'.format(ZMPS.single()))
a = plaquetteIsingTensor(weight = weight, diamondForm = True)
ctmrg = CTMRG(a, chi = 16)
ZCTMRG = ctmrg.getZ(L = 5)
print('CTMRG Z = {}'.format(ZCTMRG))
self.assertTrue(funcs.floatRelativeEqual(ZCTMRG, Z.single(), eps = 1e-10))
weight = 0.5
doubleLatticeFBC = doubleSquareLatticeFBC(n = 7, m = 7, weight = weight) # 24 tensors
tensorNetwork = IsingTNFromUndirectedGraph(doubleLatticeFBC)
# Z, cost = contractAndCostWithSequence(tensorList = tensorNetwork, seq = None, greedy = True)
# print('Z = {}, cost = {}'.format(Z.single(), cost))
ZMPS = contractWithMPS(tensorList = tensorNetwork, chi = 16)
print('Z from MPS = {}'.format(ZMPS.single()))
a = plaquetteIsingTensor(weight = weight, diamondForm = True)
ctmrg = CTMRG(a, chi = 16)
ZCTMRG = ctmrg.getZ(L = 7)
print('CTMRG Z = {}'.format(ZCTMRG))
self.assertTrue(funcs.floatRelativeEqual(ZCTMRG, ZMPS.single(), eps = 1e-10))
weight = 0.7
doubleLatticeFBC = doubleSquareLatticeFBC(n = 6, m = 6, weight = weight) # 24 tensors
tensorNetwork = IsingTNFromUndirectedGraph(doubleLatticeFBC)
# Z, cost = contractAndCostWithSequence(tensorList = tensorNetwork, seq = None, greedy = True)
# print('Z = {}, cost = {}'.format(Z.single(), cost))
ZMPS = contractWithMPS(tensorList = tensorNetwork, chi = 16)
print('Z from MPS = {}'.format(ZMPS.single()))
a = plaquetteIsingTensor(weight = weight, diamondForm = True)
ctmrg = CTMRG(a, chi = 16)
ZCTMRG = ctmrg.getZ(L = 6)
print('CTMRG Z = {}'.format(ZCTMRG))
self.assertTrue(funcs.floatRelativeEqual(ZCTMRG, ZMPS.single(), eps = 1e-10))
# test case for J1-J2 Ising model(not work for current CTMRG assuming symmetry)
# weight = (0.3, 0.4)
# doubleLatticeFBC = doubleSquareLatticeFBC(n = 3, m = 3, weight = weight) # 24 tensors
# tensorNetwork = IsingTNFromUndirectedGraph(doubleLatticeFBC)
# seq = [(2, 15), (14, 2), (5, 2), (9, 20), (21, 9), (6, 9), (16, 6), (11, 23), (22, 11), (8, 11), (19, 8), (10, 8), (6, 8), (7, 6), (18, 6), (2, 6), (17, 2), (4, 2), (1, 2), (13, 1), (3, 1), (12, 1), (0, 1)]
# Z, cost = contractAndCostWithSequence(tensorList = tensorNetwork, seq = seq)
# print('Z = {}, cost = {}'.format(Z.single(), cost))
# # exactZ = 2694263494.5463686 # pre-calculated
# # print('exact Z = {}'.format(exactZ))
# # ZMPS = contractWithMPS(tensorList = tensorNetwork, chi = 16)
# # print('Z from MPS = {}'.format(ZMPS.single()))
# a = plaquetteIsingTensor(weight = weight, diamondForm = True)
# # for i in range(1, 5):
# # print('CTMRG Z(L = {}) = {}'.format(i, ctmrg.getSingleZ(L = i)))
# ctmrg = CTMRG(a, chi = 16)
# # with self.assertWarns(RuntimeWarning):
# ZCTMRG = ctmrg.getZ(L = 3)
# print('CTMRG Z = {}'.format(ZCTMRG))
# self.assertTrue(funcs.floatRelativeEqual(ZCTMRG, Z.single(), eps = 1e-10))
| 46.947368
| 216
| 0.585762
| 820
| 7,136
| 5.086585
| 0.135366
| 0.037401
| 0.026373
| 0.035243
| 0.833613
| 0.833613
| 0.831935
| 0.82666
| 0.82666
| 0.82666
| 0
| 0.06318
| 0.252522
| 7,136
| 151
| 217
| 47.258278
| 0.718785
| 0.3338
| 0
| 0.647059
| 0
| 0
| 0.036437
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 1
| 0.029412
| false
| 0
| 0.102941
| 0
| 0.147059
| 0.161765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a5605a8d00c5785a20884753f4983402948650a
| 6,364
|
py
|
Python
|
rbac/rbac_flask/app/blueprints/extra/extra_bp.py
|
5GEVE/5G-EVE-PORTAL-BACKEND-rbac
|
33dbf612fd507f35372f850ac6d7e9669eed1bb0
|
[
"MIT"
] | null | null | null |
rbac/rbac_flask/app/blueprints/extra/extra_bp.py
|
5GEVE/5G-EVE-PORTAL-BACKEND-rbac
|
33dbf612fd507f35372f850ac6d7e9669eed1bb0
|
[
"MIT"
] | 2
|
2021-04-30T21:00:46.000Z
|
2021-06-02T00:47:09.000Z
|
rbac/rbac_flask/app/blueprints/extra/extra_bp.py
|
5GEVE/5G-EVE-PORTAL-BACKEND-rbac
|
33dbf612fd507f35372f850ac6d7e9669eed1bb0
|
[
"MIT"
] | null | null | null |
from flask import ( Blueprint, jsonify, request )
from app import oidc, config
#from flask_jwt_extended import ( jwt_optional, get_jwt_identity )
from app.keycloak.keycloak_client import Keycloak
import requests, json, collections
from requests.auth import HTTPBasicAuth
# BLUEPRINT CREATION
bp = Blueprint('extra', __name__, url_prefix='/portal/rbac/extra')
# Keycloak adapter
kc_client = Keycloak()
# Bugzilla URL
BZ_URL = config['bz_url']
# ROUTES DEFINITION
"""
Retrieves available roles
"""
@bp.route('/realmroles', methods=['GET'])
def get_realm_roles():
status_code, msg = kc_client.get_available_roles()
return jsonify({"details": msg}), status_code
##########################
## Use cases management ##
##########################
@bp.route('/use-cases', methods=['GET'])
@oidc.accept_token(require_token=True)
def get_use_cases():
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
status_code, msg = kc_client.get_user_attributes(msg['id'], "use_cases")
return jsonify({"details": msg}), status_code
@bp.route('/use-cases', methods=['POST'])
@oidc.accept_token(require_token=True)
def add_use_cases():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
try:
if not data['use_cases']:
return jsonify({"details": "No use cases provided"}), 400
except Exception as e:
return jsonify({"details": "use_cases key not found at the provided JSON"}), 400
if not type(data['use_cases']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
status_code, msg = kc_client.add_user_attributes(msg['id'], "use_cases", data['use_cases'])
return jsonify({"details": msg}), status_code
@bp.route('/use-cases', methods=['DELETE'])
@oidc.accept_token(require_token=True)
def delete_use_cases():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
if not data['use_cases']:
return jsonify({"details": "No use cases provided"}), 400
if not type(data['use_cases']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
status_code, msg = kc_client.delete_user_attributes(msg['id'], "use_cases", data['use_cases'])
return jsonify({"details": msg}), status_code
###################
## Managed sites ##
###################
@bp.route('/managed-sites', methods=['GET'])
@oidc.accept_token(require_token=True)
def get_managed_sites():
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "SiteManager" in msg['roles']:
status_code, msg = kc_client.get_user_attributes(msg['id'], "managed_sites")
else:
msg = {"managed_sites": []}
status_code = 200
return jsonify({"details": msg}), status_code
@bp.route('/managed-sites', methods=['POST'])
@oidc.accept_token(require_token=True)
def add_managed_sites():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
try:
if not data['managed_sites']:
return jsonify({"details": "No use cases provided"}), 400
except Exception as e:
return jsonify({"details": "managed_sites key not found at the provided JSON"}), 400
if not type(data['managed_sites']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "SiteManager" in msg['roles']:
status_code, msg = kc_client.add_user_attributes(msg['id'], "managed_sites", data['managed_sites'])
else:
msg = {"managed_sites": []}
status_code = 200
return jsonify({"details": msg}), status_code
@bp.route('/managed-sites', methods=['DELETE'])
@oidc.accept_token(require_token=True)
def delete_managed_sites():
if not request.is_json:
return jsonify({"details": "No json provided"}), 400
data = request.get_json()
if not 'managed_sites' in data.keys():
return jsonify({"details": "No use cases provided"}), 400
if not type(data['managed_sites']) == list:
return jsonify({"details": "Use cases must be provided using a list of elements"}), 400
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "SiteManager" in msg['roles']:
status_code, msg = kc_client.delete_user_attributes(msg['id'], "managed_sites", data['managed_sites'])
else:
msg = {"managed_sites": []}
status_code = 200
return jsonify({"details": msg}), status_code
#### For testing purposes ####
@bp.route('/services', methods=['GET'])
@oidc.accept_token(require_token=True)
def services():
token = str(request.headers['authorization']).split(" ")[1]
status_code, msg = kc_client.token_to_user(token)
if status_code == requests.codes.ok:
if "5geve_admin" in msg['roles']:
services = [{'name':'Experiments'}, {'name': 'VNF Storage'}, {'name': 'Services Catalogue'}, {'name': 'Tickets'}]
elif "5geve_experimenter" in msg['roles']:
services = [{'name':'Experiments'}, {'name': 'Services Catalogue'}, {'name': 'Tickets'}]
elif "5geve_vnfdev" in msg['roles']:
services = [{'name': 'VNF Storage'}, {'name': 'Tickets'}]
else:
services = [{}]
return jsonify({'details': services}), status_code
return msg, status_code
| 33.671958
| 125
| 0.639063
| 808
| 6,364
| 4.846535
| 0.141089
| 0.08427
| 0.11236
| 0.053626
| 0.822778
| 0.806435
| 0.798008
| 0.751788
| 0.751788
| 0.740552
| 0
| 0.011949
| 0.197832
| 6,364
| 188
| 126
| 33.851064
| 0.755142
| 0.029855
| 0
| 0.639344
| 0
| 0
| 0.222278
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.040984
| 0
| 0.295082
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a6ffbb8ba7a5ff75b8ff96954c520a3df8a5e60
| 118
|
py
|
Python
|
cslib/__init__.py
|
Suresoft-GLaDOS/cxbuild
|
1eb568bc11ae8854b1a6025c969ec94c96d6a4a9
|
[
"MIT"
] | 2
|
2021-11-01T02:11:59.000Z
|
2021-11-04T09:19:45.000Z
|
cslib/__init__.py
|
HansolChoe/cxbuild
|
c289e40efdf92f34e7781772b3b84e0a1c7d0af2
|
[
"MIT"
] | 3
|
2021-11-04T06:23:38.000Z
|
2021-11-19T01:54:05.000Z
|
cslib/__init__.py
|
HansolChoe/cxbuild
|
c289e40efdf92f34e7781772b3b84e0a1c7d0af2
|
[
"MIT"
] | 2
|
2021-11-01T03:01:28.000Z
|
2021-11-04T09:19:28.000Z
|
from .csutil import *
from .zip import *
from .lib import *
from .filepattern import fnmatch
from .windowsAPI import *
| 23.6
| 32
| 0.762712
| 16
| 118
| 5.625
| 0.5
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161017
| 118
| 5
| 33
| 23.6
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a665d43dd0c7e99f641d87290bfbdab268de20c
| 7,413
|
py
|
Python
|
PythonCodeChallenge-04/linked-list/tests/test_linked_list.py
|
MusaabShalaldeh/401-data-structures-and-algorithms
|
9fc4bf2011062f9710daa2eb36568392e62f8ab5
|
[
"MIT"
] | null | null | null |
PythonCodeChallenge-04/linked-list/tests/test_linked_list.py
|
MusaabShalaldeh/401-data-structures-and-algorithms
|
9fc4bf2011062f9710daa2eb36568392e62f8ab5
|
[
"MIT"
] | null | null | null |
PythonCodeChallenge-04/linked-list/tests/test_linked_list.py
|
MusaabShalaldeh/401-data-structures-and-algorithms
|
9fc4bf2011062f9710daa2eb36568392e62f8ab5
|
[
"MIT"
] | null | null | null |
from linked_list import __version__
from linked_list.linked_list import Node, LinkedList
import pytest
def test_version():
assert __version__ == '0.1.0'
def test_node_has_int_data():
# Arrange any data that you need to run your test
expected = 1
# Act on the subject of the test to produce some actual output
node = Node(1)
actual = node.data
# Assert
assert actual == expected
def test_node_has_str_data():
# Arrange any data that you need to run your test
expected = "a"
# Act on the subject of the test to produce some actual output
node = Node("a")
actual = node.data
# Assert
assert actual == expected
def test_node_is_a_Node():
# Arrange any data that you need to run your test
expected = "Node"
# Act on the subject of the test to produce some actual output
node = Node(1)
actual = type(node).__name__
# Assert
assert actual == expected
def test_node_without_value():
with pytest.raises(TypeError):
node = Node()
def test_new_linked_list_is_empty():
expected = None
ll = LinkedList()
actual = ll.head
assert actual == expected
def test_linked_list_insert():
# Arrange
expected = 1
ll = LinkedList()
# Act
ll.insert(1)
node = ll.head
actual = node.data
# Assert
assert actual == expected
def test_linked_list_insert_twice():
# Arrange
expected = 0
ll = LinkedList()
# Act
ll.insert(1)
ll.insert(0)
node = ll.head
actual = node.data
# Assert
assert actual == expected
assert ll.head._next.data == 1
def test_linked_list_includes():
# Arrange
expected = True
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
actual = ll.includes(5)
# Assert
assert actual == expected
def test_linked_list_includes_fail():
# Arrange
expected = False
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
actual = ll.includes(20)
# Assert
assert actual == expected
def test_linked_list_includes_first_item():
# Arrange
expected = True
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
actual = ll.includes(0)
# Assert
assert actual == expected
def test_linked_list_includes_last_item():
# Arrange
expected = True
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
actual = ll.includes(9)
# Assert
assert actual == expected
def test_linked_list_to_string():
# Arrange
expected = "{ 29 } -> { 9 } -> { 5 } -> { 0 } -> NULL"
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
ll.insert(29)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_empty_list_to_string():
# Arrange
expected = "NULL"
ll = LinkedList()
# Act
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_append():
# Arrange
expected = "{ 9 } -> { 5 } -> { 0 } -> { 29 } -> NULL"
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
ll.append(29)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_append_multi_values():
# Arrange
expected = "{ 0 } -> { 5 } -> { 9 } -> { 29 } -> NULL"
ll = LinkedList()
# Act
ll.append(0)
ll.append(5)
ll.append(9)
ll.append(29)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_append_empty():
# Arrange
expected = "{ 29 } -> NULL"
ll = LinkedList()
# Act
ll.append(29)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_insert_before():
# Arrange
expected = "{ 9 } -> { 29 } -> { 5 } -> { 0 } -> NULL"
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
ll.insert_before(5,29)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_insert_before_not_found():
# Arrange
expected = "{ 9 } -> { 7 } -> { 0 } -> NULL"
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(7)
ll.insert(9)
ll.insert_before(5,29)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_insert_before_first():
# Arrange
expected = "{ 29 } -> { 1 } -> NULL"
ll = LinkedList()
# Act
ll.insert(1)
ll.insert_before(1,29)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_insert_after():
# Arrange
expected = "{ 9 } -> { 5 } -> { 25 } -> { 0 } -> NULL"
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
ll.insert_after(5,25)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_insert_after_last():
# Arrange
expected = "{ 9 } -> { 5 } -> { 0 } -> { 25 } -> NULL"
ll = LinkedList()
# Act
ll.insert(0)
ll.insert(5)
ll.insert(9)
ll.insert_after(0,25)
actual = str(ll)
# Assert
assert actual == expected
def test_linked_list_kth():
#Arrange
expected = 8
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(8)
ll.insert(3)
ll.insert(1)
actual = ll.kthFromEnd(3)
# Assert
assert actual == expected
def test_second_linked_list_kth():
#Arrange
expected = 2
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(8)
ll.insert(3)
ll.insert(1)
actual = ll.kthFromEnd(4)
# Assert
assert actual == expected
def test_third_linked_list_kth():
#Arrange
expected = 10
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(10)
ll.insert(86)
ll.insert(6)
ll.insert(3)
ll.insert(1)
actual = ll.kthFromEnd(5)
# Assert
assert actual == expected
def test_fourth_linked_list_kth():
#Arrange
expected = 2
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(10)
ll.insert(86)
ll.insert(6)
ll.insert(3)
ll.insert(1)
actual = ll.kthFromEnd(0)
# Assert
assert actual == expected
def test_linked_list_kth_greaterthan():
#Arrange
expected = None
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(10)
ll.insert(86)
ll.insert(6)
ll.insert(3)
ll.insert(1)
actual = ll.kthFromEnd(8)
# Assert
assert actual == expected
def test_linked_list_kth_same_length():
#Arrange
expected = 2
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(10)
ll.insert(86)
ll.insert(6)
ll.insert(3)
ll.insert(1)
actual = ll.kthFromEnd(6)
# Assert
assert actual == expected
def test_linked_list_kth_negative_k():
#Arrange
expected = None
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(10)
ll.insert(86)
ll.insert(6)
ll.insert(3)
ll.insert(1)
actual = ll.kthFromEnd(-5)
# Assert
assert actual == expected
def test_linked_list_size_1():
#Arrange
expected = 10
ll = LinkedList()
#Actual
ll.insert(10)
actual = ll.kthFromEnd(1)
# Assert
assert actual == expected
def test_linked_list_kth_happy_path():
#Arrange
expected = 86
ll = LinkedList()
#Actual
ll.insert(2)
ll.insert(10)
ll.insert(86)
ll.insert(6)
ll.insert(3)
actual = ll.kthFromEnd(3)
# Assert
assert actual == expected
def test_linked_list_zip():
#Arrange
expected = "{ 1 } -> { 2 } -> { 3 } -> { 4 } -> { 5 } -> { 6 } -> NULL"
first_ll = LinkedList()
second_ll = LinkedList()
#Actual
first_ll.append(1)
first_ll.append(2)
first_ll.append(3)
second_ll.append(4)
second_ll.append(5)
second_ll.append(6)
newList = LinkedList.zipLists(first_ll,second_ll)
actual = str(newList)
# Assert
assert actual == expected
| 15.44375
| 73
| 0.634966
| 1,048
| 7,413
| 4.336832
| 0.092557
| 0.147855
| 0.132013
| 0.165897
| 0.808801
| 0.778218
| 0.757316
| 0.738174
| 0.689769
| 0.607041
| 0
| 0.035469
| 0.235532
| 7,413
| 480
| 74
| 15.44375
| 0.766543
| 0.114798
| 0
| 0.660232
| 0
| 0.003861
| 0.059768
| 0
| 0
| 0
| 0
| 0
| 0.123552
| 1
| 0.123552
| false
| 0
| 0.011583
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a83ba2968e6e0e7097619aa81b3138aa1100508
| 174
|
py
|
Python
|
nighres/filtering/__init__.py
|
atsuch/nighres
|
eb6265befb0b65b99c858ecb1c328d4d63e5a293
|
[
"Apache-2.0"
] | null | null | null |
nighres/filtering/__init__.py
|
atsuch/nighres
|
eb6265befb0b65b99c858ecb1c328d4d63e5a293
|
[
"Apache-2.0"
] | null | null | null |
nighres/filtering/__init__.py
|
atsuch/nighres
|
eb6265befb0b65b99c858ecb1c328d4d63e5a293
|
[
"Apache-2.0"
] | 1
|
2019-01-21T10:53:38.000Z
|
2019-01-21T10:53:38.000Z
|
from filter_ridge_structures import filter_ridge_structures
from bandpass_filtering import bandpass_filtering
from recursive_ridge_diffusion import recursive_ridge_diffusion
| 43.5
| 63
| 0.931034
| 22
| 174
| 6.909091
| 0.409091
| 0.144737
| 0.276316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 174
| 3
| 64
| 58
| 0.938272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
6a845bedd74859215be0c2807e24d85c9123a020
| 25
|
py
|
Python
|
__init__.py
|
xiangxw/pyhook_py3k
|
3a0a1fe8fb190e10761dd80f55a4cf8efd0fb3e3
|
[
"MIT"
] | 82
|
2015-01-18T12:28:33.000Z
|
2022-03-15T19:23:03.000Z
|
portcat/deploy/pyHook/__init__.py
|
Ghostik2005/smallprojects
|
eed57f8b706f810ab5eb7be7c1121cfd0e8f12e4
|
[
"MIT"
] | 12
|
2017-07-23T22:47:13.000Z
|
2022-02-27T14:10:12.000Z
|
portcat/deploy/pyHook/__init__.py
|
Ghostik2005/smallprojects
|
eed57f8b706f810ab5eb7be7c1121cfd0e8f12e4
|
[
"MIT"
] | 51
|
2015-01-17T08:37:40.000Z
|
2021-09-06T01:46:04.000Z
|
from HookManager import *
| 25
| 25
| 0.84
| 3
| 25
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a88eb26fb999cc50f1b80dbaab7e1aa5cfe660e
| 23
|
py
|
Python
|
tests/system/scripts/divide_with_zero.py
|
kmaork/madbg
|
9f6097d510897ddf56eb9d87d3ac82b3a177344a
|
[
"MIT"
] | 48
|
2019-07-05T23:16:42.000Z
|
2022-03-17T09:18:13.000Z
|
tests/system/scripts/divide_with_zero.py
|
kmaork/madbg
|
9f6097d510897ddf56eb9d87d3ac82b3a177344a
|
[
"MIT"
] | 30
|
2020-07-07T13:48:00.000Z
|
2022-03-24T09:19:39.000Z
|
tests/system/scripts/divide_with_zero.py
|
kmaork/madbg
|
9f6097d510897ddf56eb9d87d3ac82b3a177344a
|
[
"MIT"
] | 2
|
2021-08-16T16:30:27.000Z
|
2022-01-27T11:32:20.000Z
|
yo = 1
if yo:
1 / 0
| 7.666667
| 9
| 0.391304
| 6
| 23
| 1.5
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.478261
| 23
| 3
| 9
| 7.666667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6ac2b9a1da628138a7e6e46a2f9ee0e0bf659efc
| 21
|
py
|
Python
|
rl_with_videos/algorithms/__init__.py
|
simonr98/Reinforcement-Learning-with-Videos
|
e40cec6b8d817276375e940696b290fc4e1e8bc7
|
[
"MIT"
] | 25
|
2020-12-02T23:13:29.000Z
|
2022-02-25T07:57:30.000Z
|
rl_with_videos/algorithms/__init__.py
|
simonr98/Reinforcement-Learning-with-Videos
|
e40cec6b8d817276375e940696b290fc4e1e8bc7
|
[
"MIT"
] | 8
|
2020-12-12T14:11:58.000Z
|
2021-12-10T20:06:04.000Z
|
rl_with_videos/algorithms/__init__.py
|
simonr98/Reinforcement-Learning-with-Videos
|
e40cec6b8d817276375e940696b290fc4e1e8bc7
|
[
"MIT"
] | 8
|
2020-12-25T19:43:15.000Z
|
2021-10-13T02:53:58.000Z
|
from .sac import SAC
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6ac7d8f6f94f5567228a6a56b4baa65270eff244
| 1,084
|
py
|
Python
|
magni/cs/reconstruction/__init__.py
|
SIP-AAU/Magni
|
6328dc98a273506f433af52e6bd394754a844550
|
[
"BSD-2-Clause"
] | 42
|
2015-02-09T10:17:26.000Z
|
2021-12-21T09:38:04.000Z
|
magni/cs/reconstruction/__init__.py
|
SIP-AAU/Magni
|
6328dc98a273506f433af52e6bd394754a844550
|
[
"BSD-2-Clause"
] | 3
|
2015-03-20T12:00:40.000Z
|
2015-03-20T12:01:16.000Z
|
magni/cs/reconstruction/__init__.py
|
SIP-AAU/Magni
|
6328dc98a273506f433af52e6bd394754a844550
|
[
"BSD-2-Clause"
] | 14
|
2015-04-28T03:08:32.000Z
|
2021-07-24T13:29:24.000Z
|
"""
..
Copyright (c) 2014-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Subpackage providing implementations of generic reconstruction algorithms.
Each subpackage provides a family of generic reconstruction algorithms. Thus
each subpackage has a config module and a run function which provide the
interface of the given family of reconstruction algorithms.
Routine listings
----------------
amp
Subpackage providing implementations of Approximate Message Passing (AMP).
gamp
Subpackage providing implementations of Generalised Approximate Message
Passing (GAMP).
it
Subpackage providing implementations of Iterative Thresholding (IT).
iht
Subpackage providing implementations of Iterative Hard Thresholding (IHT).
(Deprecated)
sl0
Subpackage providing implementations of Smoothed l0 Norm (SL0).
"""
from magni.cs.reconstruction import amp
from magni.cs.reconstruction import gamp
from magni.cs.reconstruction import it
from magni.cs.reconstruction import iht
from magni.cs.reconstruction import sl0
| 30.971429
| 78
| 0.787823
| 131
| 1,084
| 6.519084
| 0.450382
| 0.133489
| 0.238876
| 0.252927
| 0.286885
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0131
| 0.154982
| 1,084
| 34
| 79
| 31.882353
| 0.919214
| 0.805351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6ae4255a565f4acd1ead35ae4081f22ca73f2147
| 3,917
|
py
|
Python
|
pyspedas/stereo/__init__.py
|
MAVENSDC/pyspedas
|
05ede2603acb514bc7803be054016142c0851685
|
[
"MIT"
] | 1
|
2020-07-07T19:52:40.000Z
|
2020-07-07T19:52:40.000Z
|
pyspedas/stereo/__init__.py
|
MAVENSDC/pyspedas
|
05ede2603acb514bc7803be054016142c0851685
|
[
"MIT"
] | null | null | null |
pyspedas/stereo/__init__.py
|
MAVENSDC/pyspedas
|
05ede2603acb514bc7803be054016142c0851685
|
[
"MIT"
] | null | null | null |
from .load import load
def mag(trange=['2013-11-5', '2013-11-6'],
probe='a',
datatype='8hz',
suffix='',
get_support_data=False,
varformat=None,
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads data from the magnetometer
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
datatype: str
Data type; Valid options: 8hz, 32hz
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Returns:
List of tplot variables created.
"""
return load(instrument='mag', trange=trange, probe=probe, datatype=datatype, suffix=suffix, get_support_data=get_support_data, varformat=varformat, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update)
def plastic(trange=['2013-11-5', '2013-11-6'],
probe='a',
datatype='1min',
level='l2',
suffix='',
get_support_data=False,
varformat=None,
downloadonly=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads data from the PLASTIC instrument
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
datatype: str
Data type; Valid options: 1min
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Returns:
List of tplot variables created.
"""
return load(instrument='plastic', trange=trange, probe=probe, level=level, datatype=datatype, suffix=suffix, get_support_data=get_support_data, varformat=varformat, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update)
| 34.973214
| 254
| 0.621394
| 516
| 3,917
| 4.643411
| 0.217054
| 0.04591
| 0.046745
| 0.033389
| 0.936561
| 0.936561
| 0.936561
| 0.936561
| 0.936561
| 0.936561
| 0
| 0.012887
| 0.306612
| 3,917
| 111
| 255
| 35.288288
| 0.869293
| 0.655093
| 0
| 0.666667
| 0
| 0
| 0.052535
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.041667
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0a727b7f8741a52e5727c2a36123dd5bc830103b
| 26
|
py
|
Python
|
ui/display/__init__.py
|
opdich/pidrop-thumbdrive
|
50fb02bb4354fc2cc52d65707c43e8b0bb62ae81
|
[
"MIT"
] | null | null | null |
ui/display/__init__.py
|
opdich/pidrop-thumbdrive
|
50fb02bb4354fc2cc52d65707c43e8b0bb62ae81
|
[
"MIT"
] | null | null | null |
ui/display/__init__.py
|
opdich/pidrop-thumbdrive
|
50fb02bb4354fc2cc52d65707c43e8b0bb62ae81
|
[
"MIT"
] | null | null | null |
from .ws_display import *
| 13
| 25
| 0.769231
| 4
| 26
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0aa3a6cb24a36cb953448a0bb6596ef70ee8da62
| 28,461
|
py
|
Python
|
cfgov/v1/migrations/0250_add_fields_to_simplechart_block.py
|
alexandersirris/consumerfinance.gov
|
611bd5d88188177759faa1fbc63ae57deb88cfbd
|
[
"CC0-1.0"
] | 37
|
2020-08-18T19:52:39.000Z
|
2022-03-23T08:08:41.000Z
|
cfgov/v1/migrations/0250_add_fields_to_simplechart_block.py
|
alexandersirris/consumerfinance.gov
|
611bd5d88188177759faa1fbc63ae57deb88cfbd
|
[
"CC0-1.0"
] | 338
|
2020-08-14T20:46:36.000Z
|
2022-03-31T20:49:32.000Z
|
cfgov/v1/migrations/0250_add_fields_to_simplechart_block.py
|
alexandersirris/consumerfinance.gov
|
611bd5d88188177759faa1fbc63ae57deb88cfbd
|
[
"CC0-1.0"
] | 14
|
2020-10-21T15:27:03.000Z
|
2022-03-17T03:16:36.000Z
|
# Generated by Django 2.2.16 on 2021-03-16 15:27
import django.core.validators
from django.db import migrations
import jobmanager.blocks
import v1.atomic_elements.organisms
import v1.blocks
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('v1', '0249_add_product_filter_to_enforcement_filter_control'),
]
operations = [
migrations.AlterField(
model_name='browsepage',
name='content',
field=wagtail.core.fields.StreamField([('full_width_text', wagtail.core.blocks.StreamBlock([('content', wagtail.core.blocks.RichTextBlock(icon='edit')), ('content_with_anchor', wagtail.core.blocks.StructBlock([('content_block', wagtail.core.blocks.RichTextBlock()), ('anchor_link', wagtail.core.blocks.StructBlock([('link_id', wagtail.core.blocks.CharBlock(help_text='\n ID will be auto-generated on save.\n However, you may enter some human-friendly text that\n will be incorporated to make it easier to read.\n ', label='ID for this content block', required=False))]))])), ('heading', wagtail.core.blocks.StructBlock([('text', v1.blocks.HeadingTextBlock(required=False)), ('level', wagtail.core.blocks.ChoiceBlock(choices=[('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')])), ('icon', v1.blocks.HeadingIconBlock(help_text='Input the name of an icon to appear to the left of the heading. E.g., approved, help-round, etc. <a href="https://cfpb.github.io/design-system/foundation/iconography">See full list of icons</a>', required=False))], required=False)), ('image', wagtail.core.blocks.StructBlock([('image', wagtail.core.blocks.StructBlock([('upload', wagtail.images.blocks.ImageChooserBlock(required=False)), ('alt', wagtail.core.blocks.CharBlock(help_text="If the image is decorative (i.e., if a screenreader wouldn't have anything useful to say about it), leave the Alt field blank.", required=False))])), ('image_width', wagtail.core.blocks.ChoiceBlock(choices=[('full', 'full'), (470, '470px'), (270, '270px'), (170, '170px')])), ('image_position', wagtail.core.blocks.ChoiceBlock(choices=[('right', 'right'), ('left', 'left')], help_text='Does not apply if the image is full-width')), ('text', wagtail.core.blocks.RichTextBlock(label='Caption', required=False)), ('is_bottom_rule', wagtail.core.blocks.BooleanBlock(default=True, help_text='Check to add a horizontal rule line to bottom of inset.', label='Has bottom rule line', required=False))])), ('table_block', v1.atomic_elements.organisms.AtomicTableBlock(table_options={'renderer': 'html'})), ('quote', wagtail.core.blocks.StructBlock([('body', wagtail.core.blocks.TextBlock()), ('citation', wagtail.core.blocks.TextBlock(required=False)), ('is_large', wagtail.core.blocks.BooleanBlock(required=False))])), ('cta', wagtail.core.blocks.StructBlock([('slug_text', wagtail.core.blocks.CharBlock(required=False)), ('paragraph_text', wagtail.core.blocks.RichTextBlock(required=False)), ('button', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('regular', 'Regular'), ('large', 'Large Primary')]))]))])), ('related_links', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))])), ('reusable_text', v1.blocks.ReusableTextChooserBlock('v1.ReusableText')), ('email_signup', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Stay informed', required=False)), ('default_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='If selected, heading will be styled as an H5 with green top rule. Deselect to style header as H3.', label='Default heading style', required=False)), ('text', wagtail.core.blocks.CharBlock(help_text='Write a sentence or two about what kinds of emails the user is signing up for, how frequently they will be sent, etc.', required=False)), ('gd_code', wagtail.core.blocks.CharBlock(help_text='Code for the topic (i.e., mailing list) you want people who submit this form to subscribe to. Format: USCFPB_###', label='GovDelivery code', required=False)), ('disclaimer_page', wagtail.core.blocks.PageChooserBlock(help_text='Choose the page that the "See Privacy Act statement" link should go to. If in doubt, use "Generic Email Sign-Up Privacy Act Statement".', label='Privacy Act statement', required=False))])), ('well', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False))])), ('well_with_ask_search', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False)), ('ask_search', wagtail.core.blocks.StructBlock([('show_label', wagtail.core.blocks.BooleanBlock(default=True, help_text='Whether to show form label.', required=False)), ('placeholder', wagtail.core.blocks.TextBlock(help_text='Text to show for the input placeholder text.', required=False))]))]))])), ('info_unit_group', wagtail.core.blocks.StructBlock([('format', wagtail.core.blocks.ChoiceBlock(choices=[('50-50', '50/50'), ('33-33-33', '33/33/33'), ('25-75', '25/75')], help_text='Choose the number and width of info unit columns.', label='Format')), ('heading', wagtail.core.blocks.StructBlock([('text', v1.blocks.HeadingTextBlock(required=False)), ('level', wagtail.core.blocks.ChoiceBlock(choices=[('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')])), ('icon', v1.blocks.HeadingIconBlock(help_text='Input the name of an icon to appear to the left of the heading. E.g., approved, help-round, etc. <a href="https://cfpb.github.io/design-system/foundation/iconography">See full list of icons</a>', required=False))], required=False)), ('intro', wagtail.core.blocks.RichTextBlock(help_text='If this field is not empty, the Heading field must also be set.', required=False)), ('link_image_and_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text="Check this to link all images and headings to the URL of the first link in their unit's list, if there is a link.", required=False)), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this to add a horizontal rule line to top of info unit group.', required=False)), ('lines_between_items', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this to show horizontal rule lines between info units.', label='Show rule lines between items', required=False)), ('info_units', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('image', wagtail.core.blocks.StructBlock([('upload', wagtail.images.blocks.ImageChooserBlock(required=False)), ('alt', wagtail.core.blocks.CharBlock(help_text="If the image is decorative (i.e., if a screenreader wouldn't have anything useful to say about it), leave the Alt field blank.", required=False))])), ('heading', wagtail.core.blocks.StructBlock([('text', v1.blocks.HeadingTextBlock(required=False)), ('level', wagtail.core.blocks.ChoiceBlock(choices=[('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')])), ('icon', v1.blocks.HeadingIconBlock(help_text='Input the name of an icon to appear to the left of the heading. E.g., approved, help-round, etc. <a href="https://cfpb.github.io/design-system/foundation/iconography">See full list of icons</a>', required=False))], default={'level': 'h3'}, required=False)), ('body', wagtail.core.blocks.RichTextBlock(blank=True, required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))]), required=False))]))), ('sharing', wagtail.core.blocks.StructBlock([('shareable', wagtail.core.blocks.BooleanBlock(help_text='If checked, share links will be included below the items.', label='Include sharing links?', required=False)), ('share_blurb', wagtail.core.blocks.CharBlock(help_text='Sets the tweet text, email subject line, and LinkedIn post text.', required=False))]))])), ('simple_chart', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('subtitle', wagtail.core.blocks.CharBlock(required=False)), ('figure', wagtail.core.blocks.CharBlock(required=False)), ('chart_type', wagtail.core.blocks.ChoiceBlock(choices=[('bar', 'Bar'), ('datetime', 'Datetime'), ('line', 'Line'), ('tilemap', 'Tilemap')])), ('data_source', wagtail.core.blocks.TextBlock(help_text="URL of the chart's data source or an array of JSON data", required=True)), ('data_series', wagtail.core.blocks.TextBlock(help_text='A string or array of keys (JSON) or headers (CSV) to include as data in the chart. Labels may be included via: {"key": <key>, "label": <label>}', required=False)), ('x_axis_data', wagtail.core.blocks.TextBlock(help_text='A string for a key/column or data array to include as categories or x values, depending on chart type.', required=False)), ('description', wagtail.core.blocks.CharBlock(help_text='Accessible description of the chart content', required=True)), ('y_axis_label', wagtail.core.blocks.CharBlock(help_text='y-axis label', required=True)), ('x_axis_label', wagtail.core.blocks.CharBlock(help_text='x-axis label, if needed', required=False)), ('transform', wagtail.core.blocks.CharBlock(help_text='Name of the javascript function in chart-hooks.js to run on the provided data before handing it to the chart', required=False)), ('filters', wagtail.core.blocks.CharBlock(help_text='Array of JSON objects of the form {"key": <key>, "label": <label>} to filter the underlying chart data on', required=False)), ('style_overrides', wagtail.core.blocks.TextBlock(help_text='A JSON object with style overrides for the underlying Highcharts chart. No object merging is done, nested objects should be referenced with dot notation: {"tooltip.shape": "circle"}', required=False)), ('credits', wagtail.core.blocks.CharBlock(help_text='Attribution for the data source', required=False)), ('date_published', wagtail.core.blocks.CharBlock(help_text='When the underlying data was published', required=False)), ('download_file', wagtail.core.blocks.CharBlock(help_text='Location of a file to download, if different from the data source', required=False)), ('download_text', wagtail.core.blocks.CharBlock(help_text='Custom text for the chart download field', required=False)), ('notes', wagtail.core.blocks.TextBlock(help_text='General chart information', required=False))])), ('expandable_group', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(help_text='Added as an <code><h3></code> at the top of this block. Also adds a wrapping <code><div></code> whose <code>id</code> attribute comes from a slugified version of this heading, creating an anchor that can be used when linking to this part of the page.', required=False)), ('body', wagtail.core.blocks.RichTextBlock(required=False)), ('is_accordion', wagtail.core.blocks.BooleanBlock(required=False)), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this to add a horizontal rule line to top of expandable group.', required=False)), ('expandables', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('is_bordered', wagtail.core.blocks.BooleanBlock(required=False)), ('is_midtone', wagtail.core.blocks.BooleanBlock(required=False)), ('is_expanded', wagtail.core.blocks.BooleanBlock(required=False)), ('content', wagtail.core.blocks.StreamBlock([('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('well', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False))])), ('links', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])), ('email', wagtail.core.blocks.StructBlock([('emails', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.EmailBlock(label='Email address')), ('text', wagtail.core.blocks.CharBlock(label='Link text (optional)', required=False))])))])), ('phone', wagtail.core.blocks.StructBlock([('fax', wagtail.core.blocks.BooleanBlock(default=False, label='Is this number a fax?', required=False)), ('phones', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('number', wagtail.core.blocks.CharBlock(help_text='Do not include spaces or dashes. Ex. 8554112372', max_length=15, validators=[django.core.validators.RegexValidator(message='Enter a numeric phone number, without punctuation.', regex='^\\d*$')])), ('extension', wagtail.core.blocks.CharBlock(max_length=4, required=False)), ('vanity', wagtail.core.blocks.CharBlock(help_text='A phoneword version of the above number. Include any formatting. Ex. (555) 222-CFPB', max_length=15, required=False)), ('tty', wagtail.core.blocks.CharBlock(help_text='Do not include spaces or dashes. Ex. 8554112372', label='TTY', max_length=15, required=False, validators=[django.core.validators.RegexValidator(message='Enter a numeric phone number, without punctuation.', regex='^\\d*$')])), ('tty_ext', wagtail.core.blocks.CharBlock(label='TTY Extension', max_length=4, required=False))])))])), ('address', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('street', wagtail.core.blocks.CharBlock(required=False)), ('city', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('state', wagtail.core.blocks.CharBlock(max_length=25, required=False)), ('zip_code', wagtail.core.blocks.CharBlock(max_length=15, required=False))]))], blank=True))])))])), ('expandable', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('is_bordered', wagtail.core.blocks.BooleanBlock(required=False)), ('is_midtone', wagtail.core.blocks.BooleanBlock(required=False)), ('is_expanded', wagtail.core.blocks.BooleanBlock(required=False)), ('content', wagtail.core.blocks.StreamBlock([('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('well', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False))])), ('links', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])), ('email', wagtail.core.blocks.StructBlock([('emails', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('url', wagtail.core.blocks.EmailBlock(label='Email address')), ('text', wagtail.core.blocks.CharBlock(label='Link text (optional)', required=False))])))])), ('phone', wagtail.core.blocks.StructBlock([('fax', wagtail.core.blocks.BooleanBlock(default=False, label='Is this number a fax?', required=False)), ('phones', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('number', wagtail.core.blocks.CharBlock(help_text='Do not include spaces or dashes. Ex. 8554112372', max_length=15, validators=[django.core.validators.RegexValidator(message='Enter a numeric phone number, without punctuation.', regex='^\\d*$')])), ('extension', wagtail.core.blocks.CharBlock(max_length=4, required=False)), ('vanity', wagtail.core.blocks.CharBlock(help_text='A phoneword version of the above number. Include any formatting. Ex. (555) 222-CFPB', max_length=15, required=False)), ('tty', wagtail.core.blocks.CharBlock(help_text='Do not include spaces or dashes. Ex. 8554112372', label='TTY', max_length=15, required=False, validators=[django.core.validators.RegexValidator(message='Enter a numeric phone number, without punctuation.', regex='^\\d*$')])), ('tty_ext', wagtail.core.blocks.CharBlock(label='TTY Extension', max_length=4, required=False))])))])), ('address', wagtail.core.blocks.StructBlock([('label', wagtail.core.blocks.CharBlock(required=False)), ('title', wagtail.core.blocks.CharBlock(required=False)), ('street', wagtail.core.blocks.CharBlock(required=False)), ('city', wagtail.core.blocks.CharBlock(max_length=50, required=False)), ('state', wagtail.core.blocks.CharBlock(max_length=25, required=False)), ('zip_code', wagtail.core.blocks.CharBlock(max_length=15, required=False))]))], blank=True))])), ('well', wagtail.core.blocks.StructBlock([('content', wagtail.core.blocks.RichTextBlock(label='Well', required=False))])), ('video_player', wagtail.core.blocks.StructBlock([('video_id', wagtail.core.blocks.RegexBlock(error_messages={'invalid': 'The YouTube video ID is in the wrong format.'}, help_text='Enter the YouTube video ID, which is located at the end of the video URL, after "v=". For example, the video ID for https://www.youtube.com/watch?v=1V0Ax9OIc84 is 1V0Ax9OIc84.', label='YouTube video ID', regex='^[\\w-]{11}$', required=False)), ('thumbnail_image', wagtail.images.blocks.ImageChooserBlock(help_text='Optional thumbnail image to show before and after the video plays. If the thumbnail image is not set here, the video player will default to showing the thumbnail that was set in (or automatically chosen by) YouTube.', required=False))])), ('snippet_list', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('body', wagtail.core.blocks.RichTextBlock(required=False)), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this to add a horizontal rule line above this block.', required=False)), ('image', wagtail.core.blocks.StructBlock([('upload', wagtail.images.blocks.ImageChooserBlock(required=False)), ('alt', wagtail.core.blocks.CharBlock(help_text="If the image is decorative (i.e., if a screenreader wouldn't have anything useful to say about it), leave the Alt field blank.", required=False))])), ('actions_column_width', wagtail.core.blocks.ChoiceBlock(choices=[('70', '70%'), ('66', '66%'), ('60', '60%'), ('50', '50%'), ('40', '40%'), ('33', '33%'), ('30', '30%')], help_text='Choose the width in % that you wish to set the Actions column in a resource list.', label='Width of "Actions" column', required=False)), ('show_thumbnails', wagtail.core.blocks.BooleanBlock(help_text="If selected, each resource in the list will include a 150px-wide image from the resource's thumbnail field.", required=False)), ('actions', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('link_label', wagtail.core.blocks.CharBlock(help_text='E.g., "Download" or "Order free prints"')), ('snippet_field', wagtail.core.blocks.ChoiceBlock(choices=[('related_file', 'Related file'), ('alternate_file', 'Alternate file'), ('link', 'Link'), ('alternate_link', 'Alternate link')], help_text='The field that the action link should point to'))]))), ('tags', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(label='Tag'), help_text='Enter tag names to filter the snippets. For a snippet to match and be output in the list, it must have been tagged with all of the tag names listed here. The tag names are case-insensitive.'))])), ('table_block', v1.atomic_elements.organisms.AtomicTableBlock(table_options={'renderer': 'html'})), ('feedback', wagtail.core.blocks.StructBlock([('was_it_helpful_text', wagtail.core.blocks.CharBlock(default='Was this page helpful to you?', help_text='Use this field only for feedback forms that use "Was this helpful?" radio buttons.', required=False)), ('intro_text', wagtail.core.blocks.CharBlock(help_text='Optional feedback intro', required=False)), ('question_text', wagtail.core.blocks.CharBlock(help_text='Optional expansion on intro', required=False)), ('radio_intro', wagtail.core.blocks.CharBlock(help_text='Leave blank unless you are building a feedback form with extra radio-button prompts, as in /owning-a-home/help-us-improve/.', required=False)), ('radio_text', wagtail.core.blocks.CharBlock(default='This information helps us understand your question better.', required=False)), ('radio_question_1', wagtail.core.blocks.CharBlock(default='How soon do you expect to buy a home?', required=False)), ('radio_question_2', wagtail.core.blocks.CharBlock(default='Do you currently own a home?', required=False)), ('button_text', wagtail.core.blocks.CharBlock(default='Submit')), ('contact_advisory', wagtail.core.blocks.RichTextBlock(help_text='Use only for feedback forms that ask for a contact email', required=False))])), ('raw_html_block', wagtail.core.blocks.RawHTMLBlock(label='Raw HTML block')), ('conference_registration_form', wagtail.core.blocks.StructBlock([('govdelivery_code', wagtail.core.blocks.CharBlock(help_text='Conference registrants will be subscribed to this GovDelivery topic.', label='GovDelivery code')), ('govdelivery_question_id', wagtail.core.blocks.RegexBlock(error_messages={'invalid': 'GovDelivery question ID must be 5 digits.'}, help_text='Enter the ID of the question in GovDelivery that is being used to track registration for this conference. It is the number in the question URL, e.g., the <code>12345</code> in <code>https://admin.govdelivery.com/questions/12345/edit</code>.', label='GovDelivery question ID', regex='^\\d{5,}$', required=False)), ('govdelivery_answer_id', wagtail.core.blocks.RegexBlock(error_messages={'invalid': 'GovDelivery answer ID must be 5 digits.'}, help_text='Enter the ID of the affirmative answer for the above question. To find it, right-click on the answer in the listing on a page like <code>https://admin.govdelivery.com/questions/12345/answers</code>, inspect the element, and look around in the source for a five-digit ID associated with that answer. <strong>Required if Govdelivery question ID is set.</strong>', label='GovDelivery answer ID', regex='^\\d{5,}$', required=False)), ('capacity', wagtail.core.blocks.IntegerBlock(help_text='Enter the (physical) conference attendance limit as a number.')), ('success_message', wagtail.core.blocks.RichTextBlock(help_text='Enter a message that will be shown on successful registration.')), ('at_capacity_message', wagtail.core.blocks.RichTextBlock(help_text='Enter a message that will be shown when the event is at capacity.')), ('failure_message', wagtail.core.blocks.RichTextBlock(help_text='Enter a message that will be shown if the GovDelivery subscription fails.'))])), ('chart_block', wagtail.core.blocks.StructBlock([('title', wagtail.core.blocks.CharBlock(required=True)), ('chart_type', wagtail.core.blocks.ChoiceBlock(choices=[('bar', 'Bar | % y-axis values'), ('line', 'Line | millions/billions y-axis values'), ('line-index', 'Line-Index | integer y-axis values'), ('tile_map', 'Tile Map | grid-like USA map')])), ('color_scheme', wagtail.core.blocks.ChoiceBlock(choices=[('blue', 'Blue'), ('gold', 'Gold'), ('green', 'Green'), ('navy', 'Navy'), ('neutral', 'Neutral'), ('purple', 'Purple'), ('teal', 'Teal')], help_text='Chart\'s color scheme. See "https://github.com/cfpb/cfpb-chart-builder#createchart-options-".', required=False)), ('data_source', wagtail.core.blocks.CharBlock(help_text='Location of the chart\'s data source relative to "https://files.consumerfinance.gov/data/". For example,"consumer-credit-trends/auto-loans/num_data_AUT.csv".', required=True)), ('date_published', wagtail.core.blocks.DateBlock(help_text='Automatically generated when CCT cron job runs')), ('description', wagtail.core.blocks.CharBlock(help_text='Briefly summarize the chart for visually impaired users.', required=True)), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this to add a horizontal rule line to top of chart block.', required=False)), ('last_updated_projected_data', wagtail.core.blocks.DateBlock(help_text='Month of latest entry in dataset')), ('metadata', wagtail.core.blocks.CharBlock(help_text='Optional metadata for the chart to use. For example, with CCT this would be the chart\'s "group".', required=False)), ('note', wagtail.core.blocks.CharBlock(help_text='Text to display as a footnote. For example, "Data from the last six months are not final."', required=False)), ('y_axis_label', wagtail.core.blocks.CharBlock(help_text='Custom y-axis label. NOTE: Line-Index chart y-axis is not overridable with this field!', required=False))])), ('mortgage_chart_block', wagtail.core.blocks.StructBlock([('content_block', wagtail.core.blocks.RichTextBlock()), ('title', wagtail.core.blocks.CharBlock(form_classname='title', required=True)), ('description', wagtail.core.blocks.CharBlock(help_text='Chart summary for visually impaired users.', required=False)), ('note', wagtail.core.blocks.CharBlock(help_text='Text for "Note" section of footnotes.', required=False)), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this to add a horizontal rule line to top of chart block.', required=False))])), ('mortgage_map_block', wagtail.core.blocks.StructBlock([('content_block', wagtail.core.blocks.RichTextBlock()), ('title', wagtail.core.blocks.CharBlock(form_classname='title', required=True)), ('description', wagtail.core.blocks.CharBlock(help_text='Chart summary for visually impaired users.', required=False)), ('note', wagtail.core.blocks.CharBlock(help_text='Text for "Note" section of footnotes.', required=False)), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this to add a horizontal rule line to top of chart block.', required=False))])), ('mortgage_downloads_block', wagtail.core.blocks.StructBlock([('show_archives', wagtail.core.blocks.BooleanBlock(default=False, help_text='Check this box to allow the archival section to display. No section will appear if there are no archival downloads.', required=False))])), ('data_snapshot', wagtail.core.blocks.StructBlock([('market_key', wagtail.core.blocks.CharBlock(help_text='Market identifier, e.g. AUT', max_length=20, required=True)), ('num_originations', wagtail.core.blocks.CharBlock(help_text='Number of originations, e.g. 1.2 million', max_length=20)), ('value_originations', wagtail.core.blocks.CharBlock(help_text='Total dollar value of originations, e.g. $3.4 billion', max_length=20)), ('year_over_year_change', wagtail.core.blocks.CharBlock(help_text='Percentage change, e.g. 5.6% increase', max_length=20)), ('last_updated_projected_data', wagtail.core.blocks.DateBlock(help_text='Month of latest entry in dataset')), ('num_originations_text', wagtail.core.blocks.CharBlock(help_text='Descriptive sentence, e.g. Auto loans originated', max_length=100)), ('value_originations_text', wagtail.core.blocks.CharBlock(help_text='Descriptive sentence, e.g. Dollar volume of new loans', max_length=100)), ('year_over_year_change_text', wagtail.core.blocks.CharBlock(help_text='Descriptive sentence, e.g. In year-over-year originations', max_length=100)), ('inquiry_month', wagtail.core.blocks.DateBlock(help_text='Month of latest entry in dataset for inquiry data', max_length=20, required=False)), ('inquiry_year_over_year_change', wagtail.core.blocks.CharBlock(help_text='Percentage change, e.g. 5.6% increase', max_length=20, required=False)), ('inquiry_year_over_year_change_text', wagtail.core.blocks.CharBlock(help_text='Descriptive sentence, e.g. In year-over-year inquiries', max_length=100, required=False)), ('tightness_month', wagtail.core.blocks.DateBlock(help_text='Month of latest entry in dataset for credit tightness data', max_length=20, required=False)), ('tightness_year_over_year_change', wagtail.core.blocks.CharBlock(help_text='Percentage change, e.g. 5.6% increase', max_length=20, required=False)), ('tightness_year_over_year_change_text', wagtail.core.blocks.CharBlock(help_text='Descriptive sentence, e.g. In year-over-year credit tightness', max_length=100, required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(icon='image', required=False))])), ('job_listing_table', jobmanager.blocks.JobListingTable()), ('yes_checklist', wagtail.core.blocks.StructBlock([('checklist', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('item', wagtail.core.blocks.CharBlock(help_text='Short description for a checkbox item')), ('details', wagtail.core.blocks.RichTextBlock(help_text='Deeper explanation of the item', required=False))])))]))], blank=True),
),
]
| 1,094.653846
| 27,911
| 0.756649
| 3,960
| 28,461
| 5.356061
| 0.158333
| 0.127063
| 0.194767
| 0.116455
| 0.648986
| 0.61273
| 0.558039
| 0.51801
| 0.491513
| 0.458887
| 0
| 0.010878
| 0.07621
| 28,461
| 25
| 27,912
| 1,138.44
| 0.795831
| 0.001616
| 0
| 0
| 1
| 1.368421
| 0.437441
| 0.065428
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.421053
| 0
| 0.578947
| 0.052632
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0aa68315c1c0a24b069c75fd053530f5560e09eb
| 5,398
|
py
|
Python
|
pubsub/google/cloud/pubsub_v1/gapic/subscriber_client_config.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 2
|
2021-11-26T07:08:43.000Z
|
2022-03-07T20:20:04.000Z
|
pubsub/google/cloud/pubsub_v1/gapic/subscriber_client_config.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | null | null | null |
pubsub/google/cloud/pubsub_v1/gapic/subscriber_client_config.py
|
DaveCheez/google-cloud-python
|
fc03d4d41f13e9d13db7206438163b3a471fdabd
|
[
"Apache-2.0"
] | 1
|
2020-04-14T10:47:41.000Z
|
2020-04-14T10:47:41.000Z
|
config = {
"interfaces": {
"google.pubsub.v1.Subscriber": {
"retry_codes": {
"idempotent": ["ABORTED", "UNAVAILABLE", "UNKNOWN"],
"non_idempotent": ["UNAVAILABLE"],
"none": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000,
},
"messaging": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 25000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 25000,
"total_timeout_millis": 600000,
},
"streaming_messaging": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 600000,
"total_timeout_millis": 600000,
},
},
"methods": {
"CreateSubscription": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetSubscription": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateSubscription": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListSubscriptions": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"DeleteSubscription": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ModifyAckDeadline": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"Acknowledge": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "messaging",
},
"Pull": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "messaging",
},
"StreamingPull": {
"timeout_millis": 900000,
"retry_codes_name": "none",
"retry_params_name": "streaming_messaging",
},
"ModifyPushConfig": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListSnapshots": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateSnapshot": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"UpdateSnapshot": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteSnapshot": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"Seek": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"SetIamPolicy": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetIamPolicy": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"TestIamPermissions": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| 40.586466
| 68
| 0.421638
| 352
| 5,398
| 6.014205
| 0.153409
| 0.165801
| 0.161549
| 0.184695
| 0.731223
| 0.722721
| 0.722721
| 0.722721
| 0.722721
| 0.66462
| 0
| 0.062632
| 0.473509
| 5,398
| 132
| 69
| 40.893939
| 0.682266
| 0
| 0
| 0.5
| 0
| 0
| 0.388292
| 0.082808
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ab27da3f1b8ec67fcf976c9a3cffba870946ea5
| 3,285
|
py
|
Python
|
econtools/metrics/tests/test_ols.py
|
fqueiro/econtools
|
cae1cfc82e02cb9081c247b530b10dc68ee18820
|
[
"BSD-3-Clause"
] | 93
|
2018-02-12T17:21:39.000Z
|
2022-03-11T23:14:18.000Z
|
econtools/metrics/tests/test_ols.py
|
fqueiro/econtools
|
cae1cfc82e02cb9081c247b530b10dc68ee18820
|
[
"BSD-3-Clause"
] | 5
|
2018-09-05T02:10:05.000Z
|
2022-01-07T17:07:23.000Z
|
econtools/metrics/tests/test_ols.py
|
fqueiro/econtools
|
cae1cfc82e02cb9081c247b530b10dc68ee18820
|
[
"BSD-3-Clause"
] | 25
|
2018-06-06T07:35:03.000Z
|
2021-12-10T06:59:06.000Z
|
from os import path
import pandas as pd
from econtools.metrics.util.testing import RegCompare
from econtools.metrics.api import reg
from econtools.metrics.tests.data.src_ols import (ols_std, ols_robust, ols_hc2,
ols_hc3, ols_cluster)
class TestOLS_std(RegCompare):
@classmethod
def setup_class(cls):
"""Stata reg output from `sysuse auto; reg price mpg`"""
cls.init(cls)
cls.precision['vce'] = 6
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
y = 'price'
x = ['mpg', 'length']
cls.result = reg(autodata, y, x, addcons=True)
cls.expected = ols_std
class TestOLS_std_y_list(RegCompare):
@classmethod
def setup_class(cls):
"""Stata reg output from `sysuse auto; reg price mpg`"""
cls.init(cls)
cls.precision['vce'] = 6
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
y = ['price']
x = ['mpg', 'length']
cls.result = reg(autodata, y, x, addcons=True)
cls.expected = ols_std
class TestOLS_hc1(RegCompare):
@classmethod
def setup_class(cls):
"""Stata reg output from `sysuse auto; reg price mpg`"""
cls.init(cls)
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
y = 'price'
x = ['mpg', 'length']
cls.result = reg(autodata, y, x, vce_type='hc1', addcons=True)
cls.expected = ols_robust
class TestOLS_hc2(RegCompare):
@classmethod
def setup_class(cls):
"""Stata reg output from `sysuse auto; reg price mpg`"""
cls.init(cls)
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
y = 'price'
x = ['mpg', 'length']
cls.result = reg(autodata, y, x, vce_type='hc2', addcons=True)
cls.expected = ols_hc2
class TestOLS_hc3(RegCompare):
@classmethod
def setup_class(cls):
"""Stata reg output from `sysuse auto; reg price mpg`"""
cls.init(cls)
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
y = 'price'
x = ['mpg', 'length']
cls.result = reg(autodata, y, x, vce_type='hc3', addcons=True)
cls.expected = ols_hc3
class TestOLS_cluster(RegCompare):
@classmethod
def setup_class(cls):
"""Stata reg output from `sysuse auto; reg price mpg`"""
cls.init(cls)
test_path = path.split(path.relpath(__file__))[0]
auto_path = path.join(test_path, 'data', 'auto.dta')
autodata = pd.read_stata(auto_path)
y = 'price'
x = ['mpg', 'length']
cls.result = reg(autodata, y, x, cluster='gear_ratio', addcons=True)
cls.expected = ols_cluster
if __name__ == '__main__':
import pytest
pytest.main()
| 30.990566
| 79
| 0.602435
| 434
| 3,285
| 4.347926
| 0.147465
| 0.050874
| 0.076312
| 0.09221
| 0.81982
| 0.766826
| 0.766826
| 0.766826
| 0.766826
| 0.766826
| 0
| 0.00745
| 0.264536
| 3,285
| 105
| 80
| 31.285714
| 0.773593
| 0.092846
| 0
| 0.688312
| 0
| 0
| 0.06409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077922
| false
| 0
| 0.077922
| 0
| 0.233766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ad9a66f32e1c97d292f41da047191a0b12596eb
| 67
|
py
|
Python
|
__init__.py
|
18F/codetalker
|
1c665de99a804d5abda1ddbccb055e05e3efa5e5
|
[
"CC0-1.0"
] | 1
|
2017-12-27T21:21:09.000Z
|
2017-12-27T21:21:09.000Z
|
__init__.py
|
18F/codetalker
|
1c665de99a804d5abda1ddbccb055e05e3efa5e5
|
[
"CC0-1.0"
] | null | null | null |
__init__.py
|
18F/codetalker
|
1c665de99a804d5abda1ddbccb055e05e3efa5e5
|
[
"CC0-1.0"
] | 2
|
2019-05-21T18:53:21.000Z
|
2021-02-18T11:11:28.000Z
|
from codetalker.main.api import app
def runserver():
app.run()
| 16.75
| 35
| 0.716418
| 10
| 67
| 4.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164179
| 67
| 4
| 36
| 16.75
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0add5f395fc530905745818ff27f859a903ecf5b
| 145
|
py
|
Python
|
j1708/__init__.py
|
grimm-co/stm32-j1708
|
5ca0b1961752ba49f816ce22cbc6aaefb4ae95dc
|
[
"BSD-3-Clause"
] | 3
|
2021-11-05T21:09:56.000Z
|
2021-11-19T03:16:41.000Z
|
j1708/__init__.py
|
grimm-co/stm32-j1708
|
5ca0b1961752ba49f816ce22cbc6aaefb4ae95dc
|
[
"BSD-3-Clause"
] | null | null | null |
j1708/__init__.py
|
grimm-co/stm32-j1708
|
5ca0b1961752ba49f816ce22cbc6aaefb4ae95dc
|
[
"BSD-3-Clause"
] | null | null | null |
from .iface import *
from .msg import *
from .pids import J1708PID
from .mids import J1708MID
from .pid_types import *
from .exceptions import *
| 20.714286
| 26
| 0.765517
| 21
| 145
| 5.238095
| 0.52381
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066116
| 0.165517
| 145
| 6
| 27
| 24.166667
| 0.842975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0ae4e4c1a6372b4cf31e4efbb1321fefb0ad204d
| 333
|
py
|
Python
|
python_modules/dagster/dagster/core/storage/event_log/__init__.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 1
|
2020-08-10T23:03:37.000Z
|
2020-08-10T23:03:37.000Z
|
python_modules/dagster/dagster/core/storage/event_log/__init__.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 7
|
2022-03-16T06:55:04.000Z
|
2022-03-18T07:03:25.000Z
|
python_modules/dagster/dagster/core/storage/event_log/__init__.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 1
|
2020-08-20T14:20:31.000Z
|
2020-08-20T14:20:31.000Z
|
from .base import AssetAwareEventLogStorage, EventLogStorage
from .in_memory import InMemoryEventLogStorage
from .schema import SqlEventLogStorageMetadata, SqlEventLogStorageTable
from .sql_event_log import AssetAwareSqlEventLogStorage, SqlEventLogStorage
from .sqlite import ConsolidatedSqliteEventLogStorage, SqliteEventLogStorage
| 55.5
| 76
| 0.900901
| 27
| 333
| 11
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072072
| 333
| 5
| 77
| 66.6
| 0.961165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e40a7d69d316cb7515e268b4763f820bb7d33bd5
| 47
|
py
|
Python
|
vanirio/__init__.py
|
vaniriovanhalteren/sdk-python
|
947b08fbe046d46275bf39bc95984fbf3edc0e6c
|
[
"MIT"
] | null | null | null |
vanirio/__init__.py
|
vaniriovanhalteren/sdk-python
|
947b08fbe046d46275bf39bc95984fbf3edc0e6c
|
[
"MIT"
] | null | null | null |
vanirio/__init__.py
|
vaniriovanhalteren/sdk-python
|
947b08fbe046d46275bf39bc95984fbf3edc0e6c
|
[
"MIT"
] | 1
|
2022-02-08T08:15:07.000Z
|
2022-02-08T08:15:07.000Z
|
from vanirio.module.interface import Interface
| 23.5
| 46
| 0.87234
| 6
| 47
| 6.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7c3c76ea23eb59c959fa2075932d96ee456288c9
| 158
|
py
|
Python
|
app/models/__init__.py
|
Jotasenpai/DigitalMediaStoreRESTfull
|
bb776d398e1756b1ff2fd4f392b80479ae29847d
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
Jotasenpai/DigitalMediaStoreRESTfull
|
bb776d398e1756b1ff2fd4f392b80479ae29847d
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
Jotasenpai/DigitalMediaStoreRESTfull
|
bb776d398e1756b1ff2fd4f392b80479ae29847d
|
[
"MIT"
] | null | null | null |
from .albums import Album # noqa:F401
from .artists import Artist # noqa:F401
from .genres import Genre # noqa:F401
from .tracks import Track # noqa:F401
| 31.6
| 40
| 0.746835
| 24
| 158
| 4.916667
| 0.5
| 0.271186
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 0.177215
| 158
| 4
| 41
| 39.5
| 0.815385
| 0.246835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7c5d9a3bfc798cd28299893025ba6e5dedc7b045
| 22,556
|
py
|
Python
|
ecogdata/datasource/tests/test_mapped_source.py
|
miketrumpis/ecogdata
|
ff65820198e69608634c12686a86b97ac3a77558
|
[
"BSD-3-Clause"
] | null | null | null |
ecogdata/datasource/tests/test_mapped_source.py
|
miketrumpis/ecogdata
|
ff65820198e69608634c12686a86b97ac3a77558
|
[
"BSD-3-Clause"
] | null | null | null |
ecogdata/datasource/tests/test_mapped_source.py
|
miketrumpis/ecogdata
|
ff65820198e69608634c12686a86b97ac3a77558
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import pytest
import numpy as np
from ecogdata.datasource.array_abstractions import HDF5Buffer
from ecogdata.datasource.memmap import MappedSource, MemoryBlowOutError
from ecogdata.datasource.basic import PlainArraySource
from .test_array_abstractions import _create_hdf5, _create_buffer, _create_binder
def test_basic_construction():
aux_arrays = ('test1', 'test2')
buffer, data = _create_buffer(aux_arrays=aux_arrays)
# hacky way to get h5py.File object...
hdf = buffer.file_array.file
aligned = dict([(k, HDF5Buffer(hdf[k])) for k in aux_arrays])
map_source = MappedSource(buffer, aligned_arrays=aligned)
shape = data.shape
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[1] == shape[1], 'aligned field {} wrong length'.format(field)
# repeat for transpose
map_source = MappedSource(buffer, aligned_arrays=aligned, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
def test_basic_construction_binder():
buffer, data = _create_binder(axis=1)
map_source = MappedSource(buffer)
shape = data.shape
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
# repeat for transpose
map_source = MappedSource(buffer, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
def test_construction_from_single_source():
aux_arrays = ('test1', 'test2')
f = _create_hdf5(aux_arrays=aux_arrays)
shape = f['data'].shape
map_source = MappedSource.from_hdf_sources(f, 'data', aligned_arrays=aux_arrays)
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[1] == shape[1], 'aligned field {} wrong length'.format(field)
# repeat for transpose
map_source = MappedSource.from_hdf_sources(f, 'data', aligned_arrays=aux_arrays, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
def test_construction_from_sources():
aux_arrays = ('test1', 'test2')
files = [_create_hdf5(aux_arrays=aux_arrays) for i in range(3)]
shape = files[0]['data'].shape
shape = (shape[0], 3 * shape[1])
map_source = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays)
assert map_source.shape == shape, 'Shape wrong'
assert map_source.binary_channel_mask.sum() == shape[0], 'Wrong number of active channels'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[1] == shape[1], 'aligned field {} wrong length'.format(field)
# repeat for transpose: now sources are stacked on axis=0, but the resulting shape is transposed per vector
# timeseries convention (channels X samples)
shape = files[0]['data'].shape
shape = (shape[0] * 3, shape[1])
map_source = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays, transpose=True)
assert map_source.shape == shape[::-1], 'Shape wrong in transpose'
assert map_source.binary_channel_mask.sum() == shape[1], 'Wrong number of active channels in transpose'
for field in aux_arrays:
assert hasattr(map_source, field), 'Aux field {} not preserved'.format(field)
assert getattr(map_source, field).shape[0] == shape[0], 'aligned field {} wrong length'.format(field)
def test_joining():
aux_arrays = ('test1', 'test2')
files = [_create_hdf5(aux_arrays=aux_arrays) for i in range(3)]
map_source1 = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays)
next_file = _create_hdf5(aux_arrays=aux_arrays)
map_source2 = MappedSource.from_hdf_sources(next_file, 'data', aligned_arrays=aux_arrays)
full_map = map_source1.join(map_source2)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'binder to buffer appending failed'
full_map = map_source2.join(map_source1)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'buffer to binder appending failed'
def test_joiningT():
aux_arrays = ('test1', 'test2')
files = [_create_hdf5(aux_arrays=aux_arrays) for i in range(3)]
map_source1 = MappedSource.from_hdf_sources(files, 'data', aligned_arrays=aux_arrays, transpose=True)
next_file = _create_hdf5(aux_arrays=aux_arrays)
map_source2 = MappedSource.from_hdf_sources(next_file, 'data', aligned_arrays=aux_arrays, transpose=True)
full_map = map_source1.join(map_source2)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'binder to buffer appending failed'
full_map = map_source2.join(map_source1)
assert full_map.shape == (len(map_source1), map_source1.shape[1] + map_source2.shape[1]), 'buffer to binder appending failed'
def test_direct_mapped():
f = _create_hdf5()
mapped_source = MappedSource.from_hdf_sources(f, 'data')
assert mapped_source.is_direct_map, 'direct map should be true'
mapped_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=range(4))
assert not mapped_source.is_direct_map, 'direct map should be false'
# for transposed disk arrays
f = _create_hdf5(transpose=True)
mapped_source = MappedSource.from_hdf_sources(f, 'data', transpose=True)
assert mapped_source.is_direct_map, 'direct map should be true'
mapped_source = MappedSource.from_hdf_sources(f, 'data', transpose=True, electrode_channels=range(4))
assert not mapped_source.is_direct_map, 'direct map should be false'
def test_scaling():
f = _create_hdf5()
float_data = f['data'][:, 500:1000].astype('d')
map_source = MappedSource.from_hdf_sources(f, 'data', units_scale=2.0)
assert np.all(map_source[:, 500:1000] == float_data * 2).all(), 'scalar scaling wrong'
map_source = MappedSource.from_hdf_sources(f, 'data', units_scale=(-100, 2.0))
assert np.all(map_source[:, 500:1000] == (float_data - 100) * 2).all(), 'affine scaling wrong'
def test_electrode_subset():
f = _create_hdf5()
electrode_channels = [2, 4, 6, 8]
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
data = f['data'][:, :][electrode_channels]
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'electrode subset failed'
def test_electrode_subsetT():
f = _create_hdf5(transpose=True)
electrode_channels = [2, 4, 6, 8]
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True)
data = f['data'][:, :][:, electrode_channels].T
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'electrode subset failed in transpose'
def test_channel_map():
f = _create_hdf5()
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
map_source.set_channel_mask(binary_mask)
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong'
data = f['data'][:, :][electrode_channels, :]
assert np.all(data[5:, 100:200] == map_source[:, 100:200]), 'channel masking failed'
# unmask
map_source.set_channel_mask(None)
binary_mask[:] = True
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong'
data = f['data'][:, :][electrode_channels, :]
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'channel masking failed'
def test_channel_mapT():
f = _create_hdf5(transpose=True)
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True)
map_source.set_channel_mask(binary_mask)
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong in transpose'
data = f['data'][:, :][:, electrode_channels].T
assert np.all(data[5:, 100:200] == map_source[:, 100:200]), 'channel masking failed in transpose'
# unmask
map_source.set_channel_mask(None)
binary_mask[:] = True
assert (map_source.binary_channel_mask == binary_mask).all(), 'binary mask wrong'
data = f['data'][:, :][:, electrode_channels].T
assert np.all(data[:, 100:200] == map_source[:, 100:200]), 'channel masking failed'
def test_channel_slicing():
f = _create_hdf5()
electrode_channels = list(range(6, 17))
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, units_scale=5.0)
data_first_channels = map_source[:3, :]
with map_source.channels_are_maps(True):
first_channels = map_source[:3]
assert isinstance(first_channels, MappedSource), 'slice did not return new map'
assert np.array_equal(data_first_channels, first_channels[:, :]), 'new map data mis-mapped'
first_channels = map_source[:3]
assert isinstance(first_channels, np.ndarray), 'slice-as-array failed'
assert np.array_equal(data_first_channels, first_channels), 'slice-as-array wrong data'
def test_channel_slicingT():
f = _create_hdf5(transpose=True)
electrode_channels = list(range(6, 17))
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True, units_scale=5.0)
data_first_channels = map_source[:3, :]
with map_source.channels_are_maps(True):
first_channels = map_source[:3]
assert isinstance(first_channels, MappedSource), 'slice did not return new map'
assert np.array_equal(data_first_channels, first_channels[:, :]), 'new map data mis-mapped'
first_channels = map_source[:3]
assert isinstance(first_channels, np.ndarray), 'slice-as-array failed'
assert np.array_equal(data_first_channels, first_channels), 'slice-as-array wrong data'
def test_channel_slicing_with_mask():
f = _create_hdf5()
electrode_channels = list(range(6, 17))
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
mask = map_source.binary_channel_mask
mask[:5] = False
map_source.set_channel_mask(mask)
data_first_channels = map_source[:3, :]
with map_source.channels_are_maps(True):
first_channels = map_source[:3]
assert isinstance(first_channels, MappedSource), 'slice did not return new map'
assert np.array_equal(data_first_channels, first_channels[:, :]), 'new map data mis-mapped'
first_channels = map_source[:3]
assert isinstance(first_channels, np.ndarray), 'slice-as-array failed'
assert np.array_equal(data_first_channels, first_channels), 'slice-as-array wrong data'
def test_big_slicing_exception():
import ecogdata.expconfig.global_config as globalconfig
f = _create_hdf5()
data = f['data']
globalconfig.OVERRIDE['memory_limit'] = data.size * data.dtype.itemsize / 2.0
map_source = MappedSource.from_hdf_sources(f, 'data')
with pytest.raises(MemoryBlowOutError):
try:
map_source[:, :]
except Exception as e:
raise e
finally:
globalconfig.OVERRIDE.pop('memory_limit')
def test_big_slicing_allowed():
import ecogdata.expconfig.global_config as globalconfig
f = _create_hdf5()
data = f['data']
globalconfig.OVERRIDE['memory_limit'] = data.size * data.dtype.itemsize / 2.0
map_source = MappedSource.from_hdf_sources(f, 'data')
try:
with map_source.big_slices(True):
_ = map_source[:, :]
except MemoryBlowOutError as e:
assert False, 'Big slicing context failed'
finally:
globalconfig.OVERRIDE.pop('memory_limit')
def test_big_slicing_allowed_always():
import ecogdata.expconfig.global_config as globalconfig
f = _create_hdf5()
data = f['data']
globalconfig.OVERRIDE['memory_limit'] = data.size * data.dtype.itemsize / 2.0
map_source = MappedSource.from_hdf_sources(f, 'data', raise_on_big_slice=False)
try:
_ = map_source[:, :]
except MemoryBlowOutError as e:
assert False, 'Big slicing context failed'
finally:
globalconfig.OVERRIDE.pop('memory_limit')
def test_write():
f = _create_hdf5()
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
shp = map_source.shape
rand_pattern = np.random.randint(0, 100, size=(2, shp[1]))
map_source[:2] = rand_pattern
# use full-slice syntax to get data
assert np.array_equal(map_source[:2, :], rand_pattern), 'write failed (map subset)'
map_source.set_channel_mask(binary_mask)
# write again
map_source[:2] = rand_pattern
assert np.array_equal(map_source[:2, :], rand_pattern), 'write failed (map subset and mask)'
def test_write_to_binder():
files = [_create_hdf5() for i in range(3)]
electrode_channels = list(range(10))
binary_mask = np.ones(10, '?')
binary_mask[:5] = False
# so channels 5, 6, 7, 8, 9 should be active
map_source = MappedSource.from_hdf_sources(files, 'data', electrode_channels=electrode_channels)
# make a write that spans buffers
single_length = files[0]['data'].shape[1]
rand_pattern = np.random.randint(0, 100, size=(2, 205))
sl = np.s_[:2, single_length - 100: single_length + 105]
map_source[sl] = rand_pattern
# use full-slice syntax to get data
assert np.array_equal(map_source[sl], rand_pattern), 'write failed to binder (map subset)'
map_source.set_channel_mask(binary_mask)
# write again
map_source[sl] = rand_pattern
assert np.array_equal(map_source[sl], rand_pattern), 'write failed to binder (map subset and mask)'
def test_iter():
f = _create_hdf5()
electrode_channels = [2, 4, 6, 8]
data = f['data'][:]
block_size = data.shape[1] // 2 + 100
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
blocks = list(map_source.iter_blocks(block_size))
assert (data[electrode_channels][:, :block_size] == blocks[0]).all(), 'first block wrong'
assert (data[electrode_channels][:, block_size:] == blocks[1]).all(), 'second block wrong'
blocks = list(map_source.iter_blocks(block_size, reverse=True))
assert (data[electrode_channels][:, block_size:][:, ::-1] == blocks[0]).all(), 'first rev block wrong'
assert (data[electrode_channels][:, :block_size][:, ::-1] == blocks[1]).all(), 'second rev block wrong'
def test_iter_binder():
files = [_create_hdf5(n_cols=100) for i in range(3)]
electrode_channels = [2, 4, 6, 8]
data = np.concatenate([f['data'][:] for f in files], axis=1)
block_size = data.shape[1] // 2 + 20
map_source = MappedSource.from_hdf_sources(files, 'data', electrode_channels=electrode_channels)
blocks = list(map_source.iter_blocks(block_size))
assert (data[electrode_channels][:, :block_size] == blocks[0]).all(), 'first block wrong'
assert (data[electrode_channels][:, block_size:] == blocks[1]).all(), 'second block wrong'
blocks = list(map_source.iter_blocks(block_size, reverse=True))
assert (data[electrode_channels][:, block_size:][:, ::-1] == blocks[0]).all(), 'first rev block wrong'
assert (data[electrode_channels][:, :block_size][:, ::-1] == blocks[1]).all(), 'second rev block wrong'
def test_iter_overlap():
f = _create_hdf5(n_cols=100)
data = f['data'][:]
block_size = 20
overlap = 10
map_source = MappedSource.from_hdf_sources(f, 'data')
blocks = list(map_source.iter_blocks(block_size, overlap=overlap))
assert (data[:, :block_size] == blocks[0]).all(), 'first block wrong'
assert (data[:, (block_size - overlap):(2 * block_size - overlap)] == blocks[1]).all(), 'second block wrong'
# last block is a partial, starting at index 90
assert (data[:, -10:] == blocks[-1]).all(), 'last block wrong'
blocks = list(map_source.iter_blocks(block_size, reverse=True, overlap=overlap))
assert (data[:, :block_size] == blocks[-1][:, ::-1]).all(), 'first block wrong'
assert (data[:, (block_size - overlap):(2 * block_size - overlap)] == blocks[-2][:, ::-1]).all(), 'second block wrong'
assert (data[:, -10:] == blocks[0][:, ::-1]).all(), 'last block wrong'
def test_iter_overlap_binder():
files = [_create_hdf5(n_cols=100) for i in range(3)]
data = np.concatenate([f['data'][:] for f in files], axis=1)
block_size = 20
overlap = 10
map_source = MappedSource.from_hdf_sources(files, 'data')
blocks = list(map_source.iter_blocks(block_size, overlap=overlap))
assert (data[:, :block_size] == blocks[0]).all(), 'first block wrong'
assert (data[:, (block_size - overlap):(2 * block_size - overlap)] == blocks[1]).all(), 'second block wrong'
# last block is a partial, starting at index 90
assert (data[:, -10:] == blocks[-1]).all(), 'last block wrong'
blocks = list(map_source.iter_blocks(block_size, reverse=True, overlap=overlap))
assert (data[:, :block_size] == blocks[-1][:, ::-1]).all(), 'first block wrong'
assert (data[:, (block_size - overlap):(2 * block_size - overlap)] == blocks[-2][:, ::-1]).all(), 'second block wrong'
assert (data[:, -10:] == blocks[0][:, ::-1]).all(), 'last block wrong'
def test_iterT():
f = _create_hdf5(transpose=True)
electrode_channels = [2, 4, 6, 8]
data = f['data'][:].T
block_size = data.shape[1] // 2 + 100
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True)
blocks = list(map_source.iter_blocks(block_size))
assert (data[electrode_channels][:, :block_size] == blocks[0]).all(), 'first block wrong in transpose'
assert (data[electrode_channels][:, block_size:] == blocks[1]).all(), 'second block wrong in transpose'
def test_iterT_binder():
files = [_create_hdf5(transpose=True, n_cols=100) for i in range(3)]
data = np.concatenate([f['data'][:] for f in files], axis=0).T
electrode_channels = [2, 4, 6, 8]
block_size = data.shape[1] // 2 + 20
map_source = MappedSource.from_hdf_sources(files, 'data', electrode_channels=electrode_channels, transpose=True)
blocks = list(map_source.iter_blocks(block_size))
assert (data[electrode_channels][:, :block_size] == blocks[0]).all(), 'first block wrong in transpose'
assert (data[electrode_channels][:, block_size:] == blocks[1]).all(), 'second block wrong in transpose'
def test_iter_channels():
f = _create_hdf5(n_rows=10, n_cols=100)
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=[2, 4, 6, 8, 9])
data = f['data'][:]
channel_blocks = []
for chans in map_source.iter_channels(chans_per_block=2):
channel_blocks.append(chans)
for n, chans in enumerate(np.array_split(data[[2, 4, 6, 8, 9]], 3)):
assert np.array_equal(channel_blocks[n], chans), 'channel block {} not equal'.format(n)
def test_iter_channelsT():
f = _create_hdf5(n_rows=10, n_cols=100, transpose=True)
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=[2, 4, 6, 8, 9], transpose=True)
data = f['data'][:].T
channel_blocks = []
for chans in map_source.iter_channels(chans_per_block=2):
channel_blocks.append(chans)
for n, chans in enumerate(np.array_split(data[[2, 4, 6, 8, 9]], 3)):
assert np.array_equal(channel_blocks[n], chans), 'channel block {} not equal'.format(n)
def _clean_up_hdf_files(temp_files):
for f in temp_files:
name = f.filename
f.close()
if os.path.exists(name):
os.unlink(name)
def test_basic_mirror():
try:
f = _create_hdf5(n_rows=25, n_cols=500)
electrode_channels = [2, 4, 6, 8]
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
temp_files = []
clone1 = map_source.mirror(new_rate_ratio=None, writeable=True, mapped=True, channel_compatible=False,
filename='foo.h5')
temp_files.append(clone1.data_buffer._array.file)
assert clone1.shape == (len(electrode_channels), 500), 'wrong # of channels'
assert clone1.writeable, 'Should be writeable'
assert isinstance(clone1, MappedSource), 'Clone is not a MappedSource'
clone2 = map_source.mirror(new_rate_ratio=None, mapped=False, channel_compatible=False)
assert isinstance(clone2, PlainArraySource), 'Not-mapped file should be PlainArraySource'
except Exception as e:
raise e
finally:
_clean_up_hdf_files(temp_files)
def test_mirror_modes():
f = _create_hdf5(n_rows=25, n_cols=500)
electrode_channels = [2, 4, 6, 8]
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels)
clone1 = map_source.mirror(writeable=True, mapped=True, channel_compatible=False)
assert clone1.shape == (len(electrode_channels), 500), 'wrong # of samples'
clone2 = map_source.mirror(writeable=True, mapped=True, channel_compatible=True)
assert clone2.data_buffer.shape == (25, 500), 'wrong # of channels for channel-compat'
f = _create_hdf5(n_rows=25, n_cols=500, transpose=True)
map_source = MappedSource.from_hdf_sources(f, 'data', electrode_channels=electrode_channels, transpose=True)
clone3 = map_source.mirror(mapped=True, channel_compatible=True)
assert clone3.data_buffer.shape == (25, 500), 'mapped mirror did not reverse the source transpose'
| 49.356674
| 129
| 0.701055
| 3,159
| 22,556
| 4.767015
| 0.076923
| 0.07112
| 0.046683
| 0.063882
| 0.875423
| 0.863338
| 0.848463
| 0.826483
| 0.821768
| 0.791354
| 0
| 0.027744
| 0.165854
| 22,556
| 456
| 130
| 49.464912
| 0.772628
| 0.030014
| 0
| 0.665768
| 0
| 0
| 0.12683
| 0
| 0
| 0
| 0
| 0
| 0.256065
| 1
| 0.083558
| false
| 0
| 0.026954
| 0
| 0.110512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c6a1975415d303fbd157872be29baae94924538
| 77,326
|
py
|
Python
|
tests/e2e/test_api_assembly_strategy_combinations_book_then_lang.py
|
danparisd/InterleavedResourcesGenerator
|
150e6c223e2eb9f63ebb41c6ba4b7c5d3337e1dc
|
[
"MIT"
] | null | null | null |
tests/e2e/test_api_assembly_strategy_combinations_book_then_lang.py
|
danparisd/InterleavedResourcesGenerator
|
150e6c223e2eb9f63ebb41c6ba4b7c5d3337e1dc
|
[
"MIT"
] | null | null | null |
tests/e2e/test_api_assembly_strategy_combinations_book_then_lang.py
|
danparisd/InterleavedResourcesGenerator
|
150e6c223e2eb9f63ebb41c6ba4b7c5d3337e1dc
|
[
"MIT"
] | 1
|
2021-09-10T20:37:07.000Z
|
2021-09-10T20:37:07.000Z
|
import os
import pathlib
import re
import bs4
import pytest
import requests
from document.config import settings
from document.entrypoints.app import app
from fastapi.testclient import TestClient
##################################################
## Tests for assembly strategy book -hen-language
def check_finished_document_with_verses_success(
response: requests.Response, finished_document_path: str
) -> None:
"""
Helper to keep tests DRY.
Check that the finished_document_path exists and also check that
the HTML file associated with it exists and includes verses_html.
"""
finished_document_path = os.path.join(settings.output_dir(), finished_document_path)
assert os.path.isfile(finished_document_path)
html_file = "{}.html".format(finished_document_path.split(".")[0])
assert os.path.isfile(html_file)
assert response.json() == {
"finished_document_request_key": pathlib.Path(finished_document_path).stem,
"message": settings.SUCCESS_MESSAGE,
}
with open(html_file, "r") as fin:
html = fin.read()
parser = bs4.BeautifulSoup(html, "html.parser")
body: bs4.elements.ResultSet = parser.find_all("body")
assert body
verses_html: bs4.elements.ResultSet = parser.find_all(
"span", attrs={"class": "v-num"}
)
assert verses_html
assert response.ok
def check_finished_document_with_body_success(
response: requests.Response, finished_document_path: str
) -> None:
"""
Helper to keep tests DRY.
Check that the finished_document_path exists and also check that
the HTML file associated with it exists and includes body.
"""
finished_document_path = os.path.join(settings.output_dir(), finished_document_path)
assert os.path.isfile(finished_document_path)
html_file = "{}.html".format(finished_document_path.split(".")[0])
assert os.path.isfile(html_file)
assert response.json() == {
"finished_document_request_key": pathlib.Path(finished_document_path).stem,
"message": settings.SUCCESS_MESSAGE,
}
with open(html_file, "r") as fin:
html = fin.read()
parser = bs4.BeautifulSoup(html, "html.parser")
body: bs4.elements.ResultSet = parser.find_all("body")
assert body
assert response.ok
def check_finished_document_without_verses_success(
response: requests.Response, finished_document_path: str
) -> None:
"""
Helper to keep tests DRY.
Check that the finished_document_path exists and also check that
the HTML file associated with it exists and includes body but not
verses_html.
"""
finished_document_path = os.path.join(settings.output_dir(), finished_document_path)
assert os.path.exists(finished_document_path)
html_file = "{}.html".format(finished_document_path.split(".")[0])
assert os.path.exists(html_file)
with open(html_file, "r") as fin:
html = fin.read()
parser = bs4.BeautifulSoup(html, "html.parser")
body: bs4.elements.ResultSet = parser.find_all("body")
assert body
verses_html: bs4.elements.ResultSet = parser.find_all(
"span", attrs={"class": "v-num"}
)
# reg is malformed and udb does not exist, thus there is
# no html generated
assert not verses_html
assert response.ok
def test_en_ulb_wa_col_en_tn_wa_col_en_tq_wa_col_en_tw_wa_col_fr_f10_col_fr_tn_col_fr_tq_col_fr_tw_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "fr",
"resource_type": "f10",
"resource_code": "col",
},
{
"lang_code": "fr",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "fr",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_en-tq-wa-col_en-tw-wa-col_fr-f10-col_fr-tn-col_fr-tq-col_fr-tw-col_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tn_wa_col_en_tq_wa_col_en_tw_wa_col_pt_br_ulb_col_pt_br_tn_col_pt_br_tq_col_pt_br_tw_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "pt-br",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "pt-br",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "pt-br",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "pt-br",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_en-tq-wa-col_en-tw-wa-col_pt-br-ulb-col_pt-br-tn-col_pt-br-tq-col_pt-br-tw-col_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_pt_br_ulb_col_pt_br_tn_col_pt_br_tq_col_pt_br_tw_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "pt-br",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "pt-br",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "pt-br",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "pt-br",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = "pt-br-ulb-col_pt-br-tn-col_pt-br-tq-col_pt-br-tw-col_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_f10_col_fr_tn_col_fr_tq_col_fr_tw_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "fr",
"resource_type": "f10",
"resource_code": "col",
},
{
"lang_code": "fr",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "fr",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"fr-f10-col_fr-tn-col_fr-tq-col_fr-tw-col_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tn_wa_col_en_tq_wa_col_en_tw_wa_col_tl_ulb_col_tl_tn_col_tl_tq_col_tl_tw_col_tl_udb_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "tl",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "tl",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "tl",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "tl",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_en-tq-wa-col_en-tw-wa-col_tl-ulb-col_tl-tn-col_tl-tq-col_tl-tw-col_tl-udb-col_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_tit_en_tn_wa_tit_book_language_order() -> None:
"English ulb-wa and tn-wa for book of Timothy."
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "tit",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-tit_en-tn-wa-tit_book_language_order.pdf"
finished_document_path = os.path.join(
settings.output_dir(), finished_document_path
)
assert os.path.isfile(finished_document_path)
assert response.json() == {
"finished_document_request_key": pathlib.Path(finished_document_path).stem,
"message": settings.SUCCESS_MESSAGE,
}
def test_sw_ulb_col_sw_tn_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
],
},
)
finished_document_path = "sw-ulb-col_sw-tn-col_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_sw_ulb_col_sw_tn_col_sw_ulb_tit_sw_tn_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
],
},
)
finished_document_path = (
"sw-ulb-col_sw-tn-col_sw-ulb-tit_sw-tn-tit_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tn_wa_col_sw_ulb_col_sw_tn_col_sw_ulb_tit_sw_tn_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_sw-ulb-col_sw-tn-col_sw-ulb-tit_sw-tn-tit_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tn_wa_col_en_tq_wa_col_sw_ulb_col_sw_tn_col_sw_tq_col_sw_ulb_tit_sw_tn_tit_sw_tq_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_en-tq-wa-col_sw-ulb-col_sw-tn-col_sw-tq-col_sw-ulb-tit_sw-tn-tit_sw-tq-tit_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_col_en_tq_wa_col_sw_ulb_col_sw_tq_col_sw_ulb_tit_sw_tq_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tq-wa-col_sw-ulb-col_sw-tq-col_sw-ulb-tit_sw-tq-tit_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_tn_wa_col_en_tq_wa_col_en_tw_wa_col_sw_tn_col_sw_tq_col_sw_tw_col_sw_tn_tit_sw_tq_tit_sw_tw_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-tn-wa-col_en-tq-wa-col_en-tw-wa-col_sw-tn-col_sw-tq-col_sw-tw-col_sw-tn-tit_sw-tq-tit_sw-tw-tit_book_language_order.pdf"
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tn_wa_col_en_tw_wa_col_sw_tn_col_sw_tw_col_sw_tn_tit_sw_tw_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "tit",
},
],
},
)
finished_document_path = "en-tn-wa-col_en-tw-wa-col_sw-tn-col_sw-tw-col_sw-tn-tit_sw-tw-tit_book_language_order.pdf"
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tq_wa_col_en_tw_wa_col_sw_tq_col_sw_tw_col_sw_tq_tit_sw_tw_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"en-tq-wa-col_en-tw-wa-col_sw-tq-col_sw-tw-col_book_language_order.pdf"
)
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tw_wa_col_sw_tw_col_sw_tw_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-tw-wa-col_sw-tw-col_book_language_order.pdf"
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tn_wa_col_en_tq_wa_col_sw_tn_col_sw_tq_col_sw_tn_tit_sw_tq_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"en-tn-wa-col_en-tq-wa-col_sw-tn-col_sw-tq-col_book_language_order.pdf"
)
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tq_wa_col_sw_tq_col_sw_tq_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tq",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-tq-wa-col_sw-tq-col_book_language_order.pdf"
check_finished_document_with_body_success(response, finished_document_path)
def test_en_tn_wa_col_sw_tn_col_sw_tn_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "tn",
"resource_code": "tit",
},
],
},
)
finished_document_path = (
"en-tn-wa-col_sw-tn-col_sw-tn-tit_book_language_order.pdf"
)
check_finished_document_with_body_success(response, finished_document_path)
def test_en_ulb_wa_col_sw_ulb_col_sw_ulb_tit_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "sw",
"resource_type": "ulb",
"resource_code": "tit",
},
],
},
)
finished_document_path = (
"en-ulb-wa-col_sw-ulb-col_sw-ulb-tit_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_gu_ulb_mrk_gu_tn_mrk_gu_tq_mrk_gu_tw_mrk_gu_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "tw",
"resource_code": "mrk",
},
{
"lang_code": "gu",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = "gu-ulb-mrk_gu-tn-mrk_gu-tq-mrk_gu-tw-mrk_gu-udb-mrk_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_tq_mrk_mr_tw_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tw",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = "mr-ulb-mrk_mr-tn-mrk_mr-tq-mrk_mr-tw-mrk_mr-udb-mrk_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_tq_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tn-mrk_mr-tq-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_tw_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tw",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tn-mrk_mr-tw-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tn_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tn",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tn-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_mr_ulb_mrk_mr_tq_mrk_mr_udb_mrk_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "mr",
"resource_type": "ulb",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "tq",
"resource_code": "mrk",
},
{
"lang_code": "mr",
"resource_type": "udb",
"resource_code": "mrk",
},
],
},
)
finished_document_path = (
"mr-ulb-mrk_mr-tq-mrk_mr-udb-mrk_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
@pytest.mark.skip
def test_gu_ulb_mic_gu_tn_mic_gu_tq_mic_gu_tw_mic_gu_ta_mic_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "ulb",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "tw",
"resource_code": "mic",
},
{
"lang_code": "gu",
"resource_type": "ta",
"resource_code": "mic",
},
],
},
)
finished_document_path = (
"gu-ulb-mic_gu-tn-mic_gu-tq-mic_gu-tw-mic_gu-ta-mic_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_ulb_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "ulb",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-ulb-gen_tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_gu_tn_mat_gu_tq_mat_gu_tw_mat_gu_udb_mat_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "tw",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "udb",
"resource_code": "mat",
},
],
},
)
finished_document_path = (
"gu-tn-mat_gu-tq-mat_gu-tw-mat_gu-udb-mat_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_gu_tn_mat_gu_tq_mat_gu_udb_mat_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "gu",
"resource_type": "tn",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "tq",
"resource_code": "mat",
},
{
"lang_code": "gu",
"resource_type": "udb",
"resource_code": "mat",
},
],
},
)
finished_document_path = (
"gu-tn-mat_gu-tq-mat_gu-udb-mat_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_tn_gen_tl_tw_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "tn",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "tw",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = (
"tl-tn-gen_tl-tw-gen_tl-udb-gen_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_tq_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "tq",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-tq-gen_tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_tw_gen_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "tw",
"resource_code": "gen",
},
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-tw-gen_tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_tl_udb_gen_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "tl",
"resource_type": "udb",
"resource_code": "gen",
},
],
},
)
finished_document_path = "tl-udb-gen_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_ulb_rev_fr_tn_rev_fr_tq_rev_fr_tw_rev_fr_udb_rev_book_language_order() -> None:
"""Demonstrate listing unfound resources, in this case fr-udb-rev"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "fr",
"resource_type": "ulb",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tn",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tq",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "udb",
"resource_code": "rev",
},
],
},
)
finished_document_path = "fr-ulb-rev_fr-tn-rev_fr-tq-rev_fr-tw-rev_fr-udb-rev_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_ulb_rev_fr_tn_rev_fr_tq_rev_fr_tw_rev_fr_f10_rev_book_language_order() -> None:
"""
Demonstrate two USFM resources, French, and use of a special
USFM resource: f10.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "fr",
"resource_type": "ulb",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tn",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tq",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "f10",
"resource_code": "rev",
},
],
},
)
finished_document_path = "fr-ulb-rev_fr-tn-rev_fr-tq-rev_fr-tw-rev_fr-f10-rev_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_ulb_rev_fr_tq_rev_fr_tw_rev_fr_f10_rev_book_language_order() -> None:
"""
Demonstrate two USFM resources, French, and use of a special
USFM resource: f10.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "fr",
"resource_type": "ulb",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tq",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "f10",
"resource_code": "rev",
},
],
},
)
finished_document_path = (
"fr-ulb-rev_fr-tq-rev_fr-tw-rev_fr-f10-rev_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_fr_ulb_rev_fr_tw_rev_fr_udb_rev_book_language_order() -> None:
"""Demonstrate listing unfound resources, in this case fr-udb-rev"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "fr",
"resource_type": "ulb",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "tw",
"resource_code": "rev",
},
{
"lang_code": "fr",
"resource_type": "f10",
"resource_code": "rev",
},
],
},
)
finished_document_path = (
"fr-ulb-rev_fr-tw-rev_fr-f10-rev_book_language_order.pdf"
)
check_finished_document_with_verses_success(response, finished_document_path)
def test_ndh_x_chindali_reg_mat_ndh_x_chindali_tn_mat_ndh_x_chindali_tq_mat_ndh_x_chindali_tw_mat_ndh_x_chindali_udb_mat_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "ndh-x-chindali",
"resource_type": "reg",
"resource_code": "mat",
},
{
"lang_code": "ndh-x-chindali",
"resource_type": "tn",
"resource_code": "mat",
},
{
"lang_code": "ndh-x-chindali",
"resource_type": "tq",
"resource_code": "mat",
},
{
"lang_code": "ndh-x-chindali",
"resource_type": "tw",
"resource_code": "mat",
},
{
"lang_code": "ndh-x-chindali",
"resource_type": "udb",
"resource_code": "mat",
},
],
},
)
finished_document_path = "ndh-x-chindali-reg-mat_ndh-x-chindali-tn-mat_ndh-x-chindali-tq-mat_ndh-x-chindali-tw-mat_ndh-x-chindali-udb-mat_book_language_order.pdf"
with pytest.raises(Exception):
check_finished_document_without_verses_success(
response, finished_document_path
)
def test_en_ulb_wa_col_en_tn_wa_col_en_tq_wa_col_en_tw_wa_col_es_419_ulb_col_es_419_tn_col_es_419_tq_col_es_419_tw_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "col",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "col",
},
{
"lang_code": "es-419",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "es-419",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "es-419",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "es-419",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = "en-ulb-wa-col_en-tn-wa-col_en-tq-wa-col_en-tw-wa-col_es-419-ulb-col_es-419-tn-col_es-419-tq-col_es-419-tw-col_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_es_ulb_col_es_tn_col_en_tq_col_es_tw_col_book_language_order() -> None:
"""
Ask for a combination of available and unavailable resources.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "es",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "es",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "es",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "es",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"es-ulb-col_es-tn-col_es-tq-col_es-tw-col_book_language_order.pdf"
)
check_finished_document_without_verses_success(response, finished_document_path)
def test_llx_ulb_col_llx_tn_col_en_tq_col_llx_tw_col_book_language_order() -> None:
"""
Ask for an unavailable resource and assert that the verses_html is
not generated.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "llx",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "llx",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "llx",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "llx",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"llx-ulb-col_llx-tn-col_llx-tq-col_llx-tw-col_book_language_order.pdf"
)
check_finished_document_without_verses_success(response, finished_document_path)
def test_llx_reg_col_llx_tn_col_en_tq_col_llx_tw_col_book_language_order() -> None:
"""
Ask for an unavailable resource and assert that the verses_html is
not generated.
"""
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "llx",
"resource_type": "reg",
"resource_code": "col",
},
{
"lang_code": "llx",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "llx",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "llx",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = (
"llx-reg-col_llx-tn-col_llx-tq-col_llx-tw-col_book_language_order.pdf"
)
finished_document_path = os.path.join(
settings.output_dir(), finished_document_path
)
html_file = "{}.html".format(finished_document_path.split(".")[0])
assert os.path.exists(finished_document_path)
assert os.path.exists(html_file)
assert response.ok
with open(html_file, "r") as fin:
html = fin.read()
parser = bs4.BeautifulSoup(html, "html.parser")
body: bs4.elements.ResultSet = parser.find_all("body")
assert body
verses_html: bs4.elements.ResultSet = parser.find_all(
"span", attrs={"class": "v-num"}
)
# Resource requested doesn't exist or isn't available so
# we assert that the verses_html was not generated and
# thus not present in the document.
assert not verses_html
def test_es_419_ulb_col_es_419_tn_col_en_tq_col_es_419_tw_col_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "es-419",
"resource_type": "ulb",
"resource_code": "col",
},
{
"lang_code": "es-419",
"resource_type": "tn",
"resource_code": "col",
},
{
"lang_code": "es-419",
"resource_type": "tq",
"resource_code": "col",
},
{
"lang_code": "es-419",
"resource_type": "tw",
"resource_code": "col",
},
],
},
)
finished_document_path = "es-419-ulb-col_es-419-tn-col_es-419-tq-col_es-419-tw-col_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_es_419_ulb_rom_es_419_tn_rom_en_tq_rom_es_419_tw_rom_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "es-419",
"resource_type": "ulb",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tn",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tq",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tw",
"resource_code": "rom",
},
],
},
)
finished_document_path = "es-419-ulb-rom_es-419-tn-rom_es-419-tq-rom_es-419-tw-rom_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_rom_en_tn_wa_rom_en_tq_wa_rom_en_tw_wa_rom_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "rom",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "rom",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "rom",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "rom",
},
],
},
)
finished_document_path = "en-ulb-wa-rom_en-tn-wa-rom_en-tq-wa-rom_en-tw-wa-rom_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
# BUG See output in ~/.ghq/bitbucket.org/foobar77/timesheets/worklog3.org [[id:6F839365-1C34-4F36-B056-A91B8E5E92B5][Logs]]
# @pytest.mark.skip
def test_en_ulb_wa_rom_en_tn_wa_rom_en_tq_wa_rom_en_tw_wa_rom_es_419_ulb_rom_es_419_tn_rom_en_tq_rom_es_419_tw_rom_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "rom",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "rom",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "rom",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "ulb",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tn",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tq",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tw",
"resource_code": "rom",
},
],
},
)
finished_document_path = "en-ulb-wa-rom_en-tn-wa-rom_en-tq-wa-rom_en-tw-wa-rom_es-419-ulb-rom_es-419-tn-rom_es-419-tq-rom_es-419-tw-rom_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_en_ulb_wa_jon_en_tn_wa_jon_en_tq_wa_jon_en_tw_wa_jon_es_419_ulb_rom_es_419_tn_rom_en_tq_rom_es_419_tw_rom_book_language_order() -> None:
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "en",
"resource_type": "ulb-wa",
"resource_code": "jon",
},
{
"lang_code": "en",
"resource_type": "tn-wa",
"resource_code": "jon",
},
{
"lang_code": "en",
"resource_type": "tq-wa",
"resource_code": "jon",
},
{
"lang_code": "en",
"resource_type": "tw-wa",
"resource_code": "jon",
},
{
"lang_code": "es-419",
"resource_type": "ulb",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tn",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tq",
"resource_code": "rom",
},
{
"lang_code": "es-419",
"resource_type": "tw",
"resource_code": "rom",
},
],
},
)
finished_document_path = "en-ulb-wa-jon_en-tn-wa-jon_en-tq-wa-jon_en-tw-wa-jon_es-419-ulb-rom_es-419-tn-rom_es-419-tq-rom_es-419-tw-rom_book_language_order.pdf"
check_finished_document_with_verses_success(response, finished_document_path)
def test_invalid_document_request() -> None:
with pytest.raises(Exception):
with TestClient(app=app, base_url=settings.api_test_url()) as client:
response: requests.Response = client.post(
"/documents",
json={
"email_address": settings.TO_EMAIL_ADDRESS,
"assembly_strategy_kind": "book_language_order",
"resource_requests": [
{
"lang_code": "",
"resource_type": "xxx",
"resource_code": "blah",
},
],
},
)
finished_document_path = "invalid_file_that_doesnt_exist.pdf"
check_finished_document_with_verses_success(
response, finished_document_path
)
| 39.251777
| 170
| 0.438546
| 6,821
| 77,326
| 4.54816
| 0.027709
| 0.054669
| 0.077813
| 0.052671
| 0.975438
| 0.972085
| 0.968862
| 0.961996
| 0.94417
| 0.929858
| 0
| 0.00575
| 0.455746
| 77,326
| 1,969
| 171
| 39.271712
| 0.731401
| 0.018648
| 0
| 0.63326
| 0
| 0.012665
| 0.225742
| 0.062856
| 0
| 0
| 0
| 0
| 0.012665
| 1
| 0.028084
| false
| 0
| 0.004956
| 0
| 0.03304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7c8c6883fff5d98216807c1edf9e2f1f7e73ae23
| 4,870
|
py
|
Python
|
src/xhorizon/shell_junction/reg_corner_masks.py
|
xh-diagrams/xhorizon
|
20b3f2f0f621ca2a31c9f6a1d5fcd06692a700ce
|
[
"MIT"
] | 1
|
2020-04-01T16:14:00.000Z
|
2020-04-01T16:14:00.000Z
|
src/xhorizon/shell_junction/reg_corner_masks.py
|
xh-diagrams/xhorizon
|
20b3f2f0f621ca2a31c9f6a1d5fcd06692a700ce
|
[
"MIT"
] | 1
|
2020-04-26T14:41:31.000Z
|
2020-04-26T14:41:31.000Z
|
src/xhorizon/shell_junction/reg_corner_masks.py
|
xh-diagrams/xhorizon
|
20b3f2f0f621ca2a31c9f6a1d5fcd06692a700ce
|
[
"MIT"
] | 1
|
2021-04-15T09:23:29.000Z
|
2021-04-15T09:23:29.000Z
|
"""
This module provides functions for applying uvbounds masks to correct blocks in a given region.
These functions should directly edit and return the input region object.
"""
import numpy as np
def EFreg(reg, abcd=None, u0=None, v0=None):
"""
"""
## sort
if (len(reg.blocks)-1)==1:
if reg.metfunc.sgnf(0)==-1.0:
out = EFreg1a(reg, abcd=abcd, u0=u0, v0=v0)
if (len(reg.blocks)-1)==2:
if reg.metfunc.sgnf(0)==1.0:
out = EFreg2a(reg, abcd=abcd, u0=u0, v0=v0)
## return
return out
def EFreg2a(reg, abcd=None, u0=None, v0=None):
"""
For Hayward-like regions (aka N=2 and f(0)>0).
Assumes slice in outermost block.
reg = The region to mask.
abcd = Which corner junction label is this piece, with standard label scheme.
Should be a single letter string 'a' 'b' 'c' or 'd'.
u0, v0 = Location to slice.
Returns the same region that was input, after editing.
Region is masked by removing unwanted blocks and setting uvbounds on remaining blocks.
Block order is [inner, middle, outer] (this is arbitrary, just happens to be how EFreg2a is generated).
Setting any of the uvbounds to np.nan causes all inequalities to fail, which causes an error unless block is removed.
"""
## initialize
uvb = [dict(), dict(), dict()]
## set proper uvbounds for each block based on abcd type
## case a
if abcd=='a':
uvb = [dict(vmin=v0), dict(vmin=v0), dict(vmin=v0, umin=u0)]
## case b
if abcd=='b':
uvb = [dict(vmin=np.nan), dict(vmin=np.nan), dict(vmax=v0,umax=u0)]
## case c
if abcd=='c':
uvb = [dict(vmin=np.nan), dict(vmin=np.nan), dict(vmin=v0,umax=u0)]
## case d
if abcd=='d':
uvb = [dict(vmax=v0), dict(vmax=v0), dict(vmax=v0, umin=u0)]
## update blocks uvbounds
for i in range(len(reg.blocks)):
reg.blocks[i].uvbounds.update(uvb[i])
## keep blocks only if no nan in uvbounds values
keep = []
for b in reg.blocks:
if not np.nan in b.uvbounds.values():
keep += [b]
reg.blocks = keep
## return
return reg
def EFreg1a(reg, abcd=None, u0=None, v0=None):
"""
For Schwarzschild-like regions (aka N=1 and f(0)<0).
Assumes slice in outermost block.
reg = The region to mask.
abcd = Which corner junction label is this piece, with standard label scheme.
Should be a single letter string 'a' 'b' 'c' or 'd'.
u0, v0 = Location to slice.
Returns the same region that was input, after editing.
Region is masked by removing unwanted blocks and setting uvbounds on remaining blocks.
Block order is [inner, outer] (this is arbitrary, just happens to be how EFreg1a is generated).
Setting any of the uvbounds to np.nan causes all inequalities to fail, which causes an error unless block is removed.
"""
## initialize
uvb = [dict(), dict()]
## set proper uvbounds for each block based on abcd type
## case a
if abcd=='a':
uvb = [dict(vmin=v0), dict(vmin=v0, umin=u0)]
## case b
if abcd=='b':
uvb = [dict(vmin=np.nan), dict(vmax=v0,umax=u0)]
## case c
if abcd=='c':
uvb = [dict(vmin=np.nan), dict(vmin=v0,umax=u0)]
## case d
if abcd=='d':
uvb = [dict(vmax=v0), dict(vmax=v0, umin=u0)]
## update blocks uvbounds
for i in range(len(reg.blocks)):
reg.blocks[i].uvbounds.update(uvb[i])
## keep blocks only if no nan in uvbounds values
keep = []
for b in reg.blocks:
if not np.nan in b.uvbounds.values():
keep += [b]
reg.blocks = keep
## return
return reg
def MAXreg2a(reg, abcd=None, u0=None, v0=None):
"""
For Schwarzschild-like regions (aka N=2 and f(0)>0).
reg = The region to mask.
abcd = Which corner junction label is this piece, with standard label scheme.
Should be a single letter string 'a' 'b' 'c' or 'd'.
u0, v0 = Location to slice.
Returns the same region that was input, after editing.
Region is masked by removing unwanted blocks and setting uvbounds on remaining blocks.
Block order is [top, right, bottom, left] (this is arbitrary, just happens to be how MAXreg2a is generated).
Setting any of the uvbounds to np.nan causes all inequalities to fail, which causes an error unless block is removed.
"""
## initialize
uvb = [dict(), dict(), dict(), dict()]
## set proper uvbounds for each block based on abcd type
## case a
if abcd=='a':
uvb = [dict(vmin=v0), dict(vmin=v0,umin=u0), dict(vmin=np.nan), dict(vmin=np.nan)]
## case b
if abcd=='b':
uvb = [dict(vmin=np.nan), dict(vmax=v0,umax=u0), dict(umax=u0), dict(vmin=np.nan)]
## case c
if abcd=='c':
uvb = [dict(vmin=np.nan), dict(vmin=v0,umax=u0), dict(vmin=np.nan), dict(vmin=np.nan)]
## case d
if abcd=='d':
uvb = [dict(vmax=v0), dict(vmax=v0,umin=u0), dict(umin=u0), dict()]
## update blocks uvbounds
for i in range(len(reg.blocks)):
reg.blocks[i].uvbounds.update(uvb[i])
## keep blocks only if no nan in uvbounds values
keep = []
for b in reg.blocks:
if not np.nan in b.uvbounds.values():
keep += [b]
reg.blocks = keep
## return
return reg
| 30.248447
| 118
| 0.672895
| 840
| 4,870
| 3.90119
| 0.160714
| 0.056149
| 0.03967
| 0.051572
| 0.909673
| 0.900519
| 0.894721
| 0.876106
| 0.844675
| 0.814464
| 0
| 0.020917
| 0.185216
| 4,870
| 160
| 119
| 30.4375
| 0.80494
| 0.550719
| 0
| 0.571429
| 0
| 0
| 0.005561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.015873
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7cbbae958f35ae19b481b8ed05d1b2e7648e8b54
| 4,501
|
py
|
Python
|
tests/test_middleware.py
|
miracle2k/hypercorn
|
b062659a476e7508e8c4ea5cb329a13b0e24074b
|
[
"MIT"
] | null | null | null |
tests/test_middleware.py
|
miracle2k/hypercorn
|
b062659a476e7508e8c4ea5cb329a13b0e24074b
|
[
"MIT"
] | null | null | null |
tests/test_middleware.py
|
miracle2k/hypercorn
|
b062659a476e7508e8c4ea5cb329a13b0e24074b
|
[
"MIT"
] | null | null | null |
from typing import Callable
import pytest
from hypercorn.middleware import DispatcherMiddleware, HTTPToHTTPSRedirectMiddleware
from .helpers import empty_framework
@pytest.mark.asyncio
async def test_http_to_https_redirect_middleware_http() -> None:
app = HTTPToHTTPSRedirectMiddleware(empty_framework, "localhost")
sent_events = []
async def send(message: dict) -> None:
nonlocal sent_events
sent_events.append(message)
scope = {"type": "http", "scheme": "http", "path": "/abc", "query_string": b"a=b"}
await app(scope, None, send)
assert sent_events == [
{
"type": "http.response.start",
"status": 307,
"headers": [(b"location", b"https://localhost/abc?a=b")],
},
{"type": "http.response.body"},
]
@pytest.mark.asyncio
async def test_http_to_https_redirect_middleware_websocket() -> None:
app = HTTPToHTTPSRedirectMiddleware(empty_framework, "localhost")
sent_events = []
async def send(message: dict) -> None:
nonlocal sent_events
sent_events.append(message)
scope = {
"type": "websocket",
"scheme": "ws",
"path": "/abc",
"query_string": b"a=b",
"extensions": {"websocket.http.response": {}},
}
await app(scope, None, send)
assert sent_events == [
{
"type": "websocket.http.response.start",
"status": 307,
"headers": [(b"location", b"wss://localhost/abc?a=b")],
},
{"type": "websocket.http.response.body"},
]
@pytest.mark.asyncio
async def test_http_to_https_redirect_middleware_websocket_http2() -> None:
app = HTTPToHTTPSRedirectMiddleware(empty_framework, "localhost")
sent_events = []
async def send(message: dict) -> None:
nonlocal sent_events
sent_events.append(message)
scope = {
"type": "websocket",
"http_version": "2",
"scheme": "ws",
"path": "/abc",
"query_string": b"a=b",
"extensions": {"websocket.http.response": {}},
}
await app(scope, None, send)
assert sent_events == [
{
"type": "websocket.http.response.start",
"status": 307,
"headers": [(b"location", b"https://localhost/abc?a=b")],
},
{"type": "websocket.http.response.body"},
]
@pytest.mark.asyncio
async def test_http_to_https_redirect_middleware_websocket_no_rejection() -> None:
app = HTTPToHTTPSRedirectMiddleware(empty_framework, "localhost")
sent_events = []
async def send(message: dict) -> None:
nonlocal sent_events
sent_events.append(message)
scope = {
"type": "websocket",
"http_version": "2",
"scheme": "ws",
"path": "/abc",
"query_string": b"a=b",
}
await app(scope, None, send)
assert sent_events == [{"type": "websocket.close"}]
@pytest.mark.asyncio
async def test_dispatcher_middleware() -> None:
class EchoFramework:
def __init__(self, name: str) -> None:
self.name = name
async def __call__(self, scope: dict, receive: Callable, send: Callable) -> None:
response = f"{self.name}-{scope['path']}"
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [(b"content-length", b"%d" % len(response))],
}
)
await send({"type": "http.response.body", "body": response.encode()})
app = DispatcherMiddleware({"/api/x": EchoFramework("apix"), "/api": EchoFramework("api")})
sent_events = []
async def send(message: dict) -> None:
nonlocal sent_events
sent_events.append(message)
scope = {"type": "http", "asgi": {"version": "3.0"}}
await app(dict(path="/api/x/b", **scope), None, send)
await app(dict(path="/api/b", **scope), None, send)
await app(dict(path="/", **scope), None, send)
assert sent_events == [
{"type": "http.response.start", "status": 200, "headers": [(b"content-length", b"7")]},
{"type": "http.response.body", "body": b"apix-/b"},
{"type": "http.response.start", "status": 200, "headers": [(b"content-length", b"6")]},
{"type": "http.response.body", "body": b"api-/b"},
{"type": "http.response.start", "status": 404, "headers": [(b"content-length", b"0")]},
{"type": "http.response.body"},
]
| 30.828767
| 95
| 0.576094
| 488
| 4,501
| 5.17418
| 0.172131
| 0.079208
| 0.063366
| 0.063762
| 0.811089
| 0.77703
| 0.734257
| 0.734257
| 0.710495
| 0.710495
| 0
| 0.008646
| 0.254832
| 4,501
| 145
| 96
| 31.041379
| 0.744186
| 0
| 0
| 0.594828
| 0
| 0
| 0.228838
| 0.046656
| 0
| 0
| 0
| 0
| 0.043103
| 1
| 0.008621
| false
| 0
| 0.034483
| 0
| 0.051724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7cd823cda65cdbb90961858e3a1d6bd003e6ef53
| 101
|
py
|
Python
|
tests/exog/random/random_exog_75_40.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/exog/random/random_exog_75_40.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/exog/random/random_exog_75_40.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 75,40);
| 25.25
| 55
| 0.861386
| 14
| 101
| 5.928571
| 0.714286
| 0.240964
| 0.457831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 0.069307
| 101
| 4
| 56
| 25.25
| 0.840426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
86212b3a7a3032c0947ed6072ef915dfc197880e
| 467
|
py
|
Python
|
Jupyter/imports4PyMOLjupyter.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Jupyter/imports4PyMOLjupyter.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Jupyter/imports4PyMOLjupyter.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: Imports needed for most uses of pymol in Jupyter. Combination of importPyMOL and importPythonDisplay.
# Source: placeHolder
"""
cmd.do('from pymol import cmd')
cmd.do('from IPython.display import Image')
cmd.do('from IPython.core.display import HTML')
cmd.do('PATH = "/Users/blaine/"')
"""
cmd.do('from pymol import cmd')
cmd.do('from IPython.display import Image')
cmd.do('from IPython.core.display import HTML')
cmd.do('PATH = "/Users/blaine/"')
| 31.133333
| 117
| 0.732334
| 69
| 467
| 4.956522
| 0.405797
| 0.116959
| 0.157895
| 0.187135
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0
| 0
| 0.117773
| 467
| 14
| 118
| 33.357143
| 0.830097
| 0.631692
| 0
| 0
| 0
| 0
| 0.699387
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8645ec4f1149539697a4f57bff38e8f75ae06368
| 27
|
py
|
Python
|
devel/lib/python2.7/dist-packages/plutodrone/srv/__init__.py
|
EveVengerov/Gesture-Controlling-Drone
|
8fe38dbfdc496472e13e76bcdb55b471f51b42ea
|
[
"MIT"
] | 2
|
2021-09-22T19:06:19.000Z
|
2021-09-22T20:22:40.000Z
|
devel/lib/python2.7/dist-packages/plutodrone/srv/__init__.py
|
EveVengerov/Gesture-Controlling-Drone
|
8fe38dbfdc496472e13e76bcdb55b471f51b42ea
|
[
"MIT"
] | null | null | null |
devel/lib/python2.7/dist-packages/plutodrone/srv/__init__.py
|
EveVengerov/Gesture-Controlling-Drone
|
8fe38dbfdc496472e13e76bcdb55b471f51b42ea
|
[
"MIT"
] | null | null | null |
from ._PlutoPilot import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 6.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
868557ee807d80b9e8d4eb597572bfb9efb9c2f1
| 345
|
py
|
Python
|
core/serializers/__init__.py
|
decosterkevin/foodtrack-back
|
c459b7f30854e6d114ffb0ff04b1ae7f36b73cd8
|
[
"MIT"
] | null | null | null |
core/serializers/__init__.py
|
decosterkevin/foodtrack-back
|
c459b7f30854e6d114ffb0ff04b1ae7f36b73cd8
|
[
"MIT"
] | 4
|
2021-04-08T21:59:06.000Z
|
2021-06-10T20:42:55.000Z
|
core/serializers/__init__.py
|
decosterkevin/foodtrack-back
|
c459b7f30854e6d114ffb0ff04b1ae7f36b73cd8
|
[
"MIT"
] | null | null | null |
from .address import AddressSerializer, ExploitationSerializer
from .product import ProductSerializer
from .cart import CartSerializer, CartItemSerializer
from .profile import ProductorProfileSerializer, UserProfileSerializer, SimpleProductorProfileSerializer # ProductorProfileFullSerializer,
from .product_profile import FullProductSerializer
| 57.5
| 138
| 0.892754
| 26
| 345
| 11.807692
| 0.615385
| 0.071661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075362
| 345
| 5
| 139
| 69
| 0.962382
| 0.089855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
868b2b62a8277f827af964fc21cbf6fe424b4257
| 39
|
py
|
Python
|
_main_train/_model_builder/__init__.py
|
oxquantum/CVAE
|
0352ddc51fbfd8d57b155e6de66b4c34e010beac
|
[
"MIT"
] | null | null | null |
_main_train/_model_builder/__init__.py
|
oxquantum/CVAE
|
0352ddc51fbfd8d57b155e6de66b4c34e010beac
|
[
"MIT"
] | null | null | null |
_main_train/_model_builder/__init__.py
|
oxquantum/CVAE
|
0352ddc51fbfd8d57b155e6de66b4c34e010beac
|
[
"MIT"
] | null | null | null |
from .model_builder import build_model
| 19.5
| 38
| 0.871795
| 6
| 39
| 5.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 39
| 1
| 39
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
86977aa569f51218759bfe9641c2b16086343b7b
| 173
|
py
|
Python
|
produto/meus_produtos/admin.py
|
guilhon/aplicacao-basica-python-django
|
e5fd9c5a227428047cae50a38cb9bd4b74f3860b
|
[
"MIT"
] | null | null | null |
produto/meus_produtos/admin.py
|
guilhon/aplicacao-basica-python-django
|
e5fd9c5a227428047cae50a38cb9bd4b74f3860b
|
[
"MIT"
] | null | null | null |
produto/meus_produtos/admin.py
|
guilhon/aplicacao-basica-python-django
|
e5fd9c5a227428047cae50a38cb9bd4b74f3860b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from meus_produtos.models import Produto
class ProdutoModelAdmin(admin.ModelAdmin):
pass
admin.site.register(Produto, ProdutoModelAdmin)
| 21.625
| 47
| 0.843931
| 21
| 173
| 6.904762
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092486
| 173
| 7
| 48
| 24.714286
| 0.923567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
869ebb0ce11286078960ce6087033256e269154b
| 204
|
py
|
Python
|
mhw/__init__.py
|
reireias/mhw
|
1e988f5e2cf019ff486af256ef323e49bb5af671
|
[
"MIT"
] | null | null | null |
mhw/__init__.py
|
reireias/mhw
|
1e988f5e2cf019ff486af256ef323e49bb5af671
|
[
"MIT"
] | null | null | null |
mhw/__init__.py
|
reireias/mhw
|
1e988f5e2cf019ff486af256ef323e49bb5af671
|
[
"MIT"
] | null | null | null |
"""
MHW utility tools package
"""
from .damage import Condition, calculate
from . import motionlist
from . import monster
from .util import generate_skill_patterns, generate_targets, skill_rank, to_label
| 25.5
| 81
| 0.803922
| 27
| 204
| 5.888889
| 0.703704
| 0.125786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127451
| 204
| 7
| 82
| 29.142857
| 0.893258
| 0.122549
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
86e35754c4f172d31af661e8762ebddddd72bc9c
| 5,919
|
py
|
Python
|
src/service/service.py
|
LeapSunrise/Z-Moves_bot
|
ac2f0a47f769166976896e80b356ba6968ed7f02
|
[
"MIT"
] | 3
|
2021-03-15T14:06:38.000Z
|
2021-05-28T17:37:34.000Z
|
src/service/service.py
|
LeapSunrise/z-moves-bot
|
ac2f0a47f769166976896e80b356ba6968ed7f02
|
[
"MIT"
] | null | null | null |
src/service/service.py
|
LeapSunrise/z-moves-bot
|
ac2f0a47f769166976896e80b356ba6968ed7f02
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# !/usr/bin/python3.8.5
import random
import string
from src.schedule_parser.schedule_parser import *
from src.service.buttons import *
separator = '_' * 35
def dynamic_menu_links_inline_keyboard_generator(chat_id):
"""
Generates dynamic main_menu/links inline keyboard.
:param chat_id:
:return:
"""
user_group = db.get_user_info(chat_id)[2]
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(inline_add_link_button)
if db.get_links(chat_id, user_group) is not None:
keyboard.add(inline_change_link_button)
keyboard.add(inline_remove_link_button)
return keyboard
def dynamic_menu_hotlines_inline_keyboard_generator(chat_id):
"""
Generates dynamic main_menu/hotlines inline keyboard.
:param chat_id:
:return:
"""
user_group = db.get_user_info(chat_id)[2]
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(inline_add_hotline_button)
if db.get_hotlines(chat_id, user_group) is not None:
keyboard.add(inline_change_hotline_button)
keyboard.add(inline_remove_hotline_button)
return keyboard
def generate_inline_subjects_to_add_link(chat_id):
"""
Generates subjects inline keyboard to add links.
Button text creates as "(subject name)".
Callback_data creates as "(first 10 symbols of subject name)".
:param chat_id:
:return:
"""
list_subjects = tuple(Schedule.get_lessons(chat_id))
keyboard = telebot.types.InlineKeyboardMarkup()
for item in list_subjects:
keyboard.add(telebot.types.InlineKeyboardButton(text=item, callback_data=f"link_add_{item[:25]}"))
keyboard.add(inline_links_first_back_button)
return keyboard
def generate_inline_subjects_to_add_hotline(chat_id):
"""
Generates subjects inline keyboard to add hotlines.
Button text creates as "(subject name)".
Callback_data creates as "hl_(first 10 symbols of subject name)".
:param chat_id:
:return:
"""
list_subjects = tuple(Schedule.get_lessons(chat_id))
keyboard = telebot.types.InlineKeyboardMarkup()
for item in list_subjects:
keyboard.add(telebot.types.InlineKeyboardButton(text=item, callback_data=f"hotline_add_{item[:25]}"))
keyboard.add(inline_first_back_button_hotlines)
return keyboard
def generate_inline_linked_subjects_to_change(chat_id):
"""
Generates subjects inline keyboard to change links.
Button text creates as "(subject type) - (subject name)".
Callback_data creates as "lch_(link addition date)".
:param chat_id:
:return:
"""
user_group = db.get_user_info(chat_id)[2]
keyboard = telebot.types.InlineKeyboardMarkup()
if db.get_links(chat_id, user_group) is not None:
for item in db.get_links(chat_id, user_group):
keyboard.add(telebot.types.InlineKeyboardButton(text=f"{item[2]} - {item[1]}",
callback_data=f"link_ch_{item[6]}"))
keyboard.add(inline_links_first_back_button)
return keyboard
else:
return ''
def generate_inline_hotlined_subjects_to_change(chat_id):
"""
Generates subjects inline keyboard to change hotlines.
Button text creates as "(hotline date) - (subject name)".
Callback_data creates as "hlch_(hotline addition date)".
:param chat_id:
:return:
"""
user_group = db.get_user_info(chat_id)[2]
keyboard = telebot.types.InlineKeyboardMarkup()
if db.get_hotlines(chat_id, user_group) is not None:
for item in db.get_hotlines(chat_id, user_group):
keyboard.add(telebot.types.InlineKeyboardButton(text=f"{item[3].strftime('%d.%m')} - {item[1]}",
callback_data=f"hotline_ch_{item[5]}"))
keyboard.add(inline_first_back_button_hotlines)
return keyboard
else:
return ''
def generate_inline_linked_subjects_to_remove(chat_id):
"""
Generates subjects inline keyboard to remove links.
Button text creates as "(subject type) - (subject name)".
Callback_data creates as "lrm_(link addition date)".
:param chat_id:
:return:
"""
user_group = db.get_user_info(chat_id)[2]
keyboard = telebot.types.InlineKeyboardMarkup()
if db.get_links(chat_id, user_group) is not None:
for item in db.get_links(chat_id, user_group):
keyboard.add(telebot.types.InlineKeyboardButton(text=f"{item[2]} - {item[1]}",
callback_data=f"link_rm_{item[6]}"))
keyboard.add(inline_links_first_back_button)
return keyboard
else:
return ''
def generate_inline_hotlined_subjects_to_remove(chat_id):
"""
Generates subjects inline keyboard to remove hotlines.
Button text creates as "(hotline date) - (subject name)".
Callback_data creates as "hlrm_(hotline addition date)".
:param chat_id:
:return:
"""
user_group = db.get_user_info(chat_id)[2]
keyboard = telebot.types.InlineKeyboardMarkup()
if db.get_hotlines(chat_id, user_group) is not None:
for item in db.get_hotlines(chat_id, user_group):
keyboard.add(telebot.types.InlineKeyboardButton(text=f"{item[3].strftime('%d.%m')} - {item[1]}",
callback_data=f"hotline_rm_{item[5]}"))
keyboard.add(inline_first_back_button_hotlines)
return keyboard
else:
return ''
def rozklad_api_work_checker():
"""
Simple rozklad API accessibility checker.
:return:
"""
try:
requests.get('https://api.rozklad.org.ua/', timeout=3)
except:
return False
def token_generator(length):
ra = '0123456789' + string.ascii_letters + '0123456789'
return ''.join(random.choice(ra) for i in range(length))
| 31.994595
| 109
| 0.673087
| 753
| 5,919
| 5.02656
| 0.156707
| 0.053897
| 0.053897
| 0.03963
| 0.86288
| 0.845443
| 0.831704
| 0.828005
| 0.805812
| 0.72893
| 0
| 0.011544
| 0.224362
| 5,919
| 184
| 110
| 32.168478
| 0.812895
| 0.23433
| 0
| 0.62069
| 0
| 0
| 0.066604
| 0.017995
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114943
| false
| 0
| 0.045977
| 0
| 0.321839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
86f31af3cdd4d02394795da5a728222683ef5121
| 146
|
py
|
Python
|
Domain/Python/if_else.py
|
bhansa/Hack
|
dd312af4446fa86da5e4740f6efc3c1ba50e53de
|
[
"Apache-2.0"
] | null | null | null |
Domain/Python/if_else.py
|
bhansa/Hack
|
dd312af4446fa86da5e4740f6efc3c1ba50e53de
|
[
"Apache-2.0"
] | null | null | null |
Domain/Python/if_else.py
|
bhansa/Hack
|
dd312af4446fa86da5e4740f6efc3c1ba50e53de
|
[
"Apache-2.0"
] | null | null | null |
n=int(raw_input())
if n%2!=0:
print "Weird"
elif n>2 and n<5:
print "Not Weird"
elif n>6 and n<20:
print "Weird"
elif n>20:
print "Not Weird"
| 14.6
| 18
| 0.643836
| 33
| 146
| 2.818182
| 0.454545
| 0.290323
| 0.322581
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07563
| 0.184932
| 146
| 9
| 19
| 16.222222
| 0.705882
| 0
| 0
| 0.444444
| 0
| 0
| 0.191781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.444444
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
811a0cea2aad461710a4790da7c00b08558defd7
| 38
|
py
|
Python
|
image_classification/SwinTransformer/augmentation.py
|
chuliuT/PaddleViT
|
282e5013f0460fa9f9b010775ff4d2607e7370ef
|
[
"Apache-2.0"
] | null | null | null |
image_classification/SwinTransformer/augmentation.py
|
chuliuT/PaddleViT
|
282e5013f0460fa9f9b010775ff4d2607e7370ef
|
[
"Apache-2.0"
] | null | null | null |
image_classification/SwinTransformer/augmentation.py
|
chuliuT/PaddleViT
|
282e5013f0460fa9f9b010775ff4d2607e7370ef
|
[
"Apache-2.0"
] | null | null | null |
import paddle
import paddle.nn as nn
| 9.5
| 22
| 0.789474
| 7
| 38
| 4.285714
| 0.571429
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 3
| 23
| 12.666667
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d4df8b5b86a7dcfb6b73f51e985891eb3a98cdd3
| 75
|
py
|
Python
|
jacdac/power_supply/__init__.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | 1
|
2022-02-15T21:30:36.000Z
|
2022-02-15T21:30:36.000Z
|
jacdac/power_supply/__init__.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | null | null | null |
jacdac/power_supply/__init__.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | 1
|
2022-02-08T19:32:45.000Z
|
2022-02-08T19:32:45.000Z
|
# Autogenerated file.
from .client import PowerSupplyClient # type: ignore
| 25
| 52
| 0.8
| 8
| 75
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 75
| 2
| 53
| 37.5
| 0.923077
| 0.426667
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
be0b31613218ea2d46744aae47249304600611a6
| 7,667
|
py
|
Python
|
deploy/virenv/lib/python2.7/site-packages/haystack/reverse/cli.py
|
wangvictor2012/liuwei
|
0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1
|
[
"BSD-3-Clause"
] | null | null | null |
deploy/virenv/lib/python2.7/site-packages/haystack/reverse/cli.py
|
wangvictor2012/liuwei
|
0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1
|
[
"BSD-3-Clause"
] | null | null | null |
deploy/virenv/lib/python2.7/site-packages/haystack/reverse/cli.py
|
wangvictor2012/liuwei
|
0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Entry points related to reverse. """
import os
import sys
from haystack import argparse_utils
from haystack import cli
from haystack.reverse import api
# the description of the function
REVERSE_DESC = 'Reverse the data structure from the process memory'
REVERSE_SHOW_DESC = 'Show the record at a specific address'
REVERSE_PARENT_DESC = 'List the predecessors pointing to the record at this address'
REVERSE_HEX_DESC = 'Show the Hex values for the record at that address.'
def reverse_argparser(reverse_parser):
reverse_parser.set_defaults(func=reverse_cmdline)
return reverse_parser
def reverse_show_argparser(show_parser):
""" Show function options argument parser """
show_parser.add_argument('address', type=argparse_utils.int16, help='Record memory address in hex')
show_parser.set_defaults(func=reverse_show_cmdline)
return show_parser
def reverse_parents_argparser(parents_parser):
parents_parser.add_argument('address', type=argparse_utils.int16, action='store', default=None,
help='Hex address of the child structure')
parents_parser.set_defaults(func=show_predecessors_cmdline)
return parents_parser
def reverse_hex_argparser(hex_parser):
hex_parser.add_argument('address', type=argparse_utils.int16, action='store', default=None,
help='Specify the address of the record, or encompassed by the record')
hex_parser.set_defaults(func=show_hex)
return hex_parser
def show_hex(args):
""" Show the Hex values for the record at that address. """
memory_handler = cli.get_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
st = ctx.get_record_at_address(args.address)
print repr(st.bytes)
except ValueError as e:
print None
return
def show_predecessors_cmdline(args):
"""
Show the predecessors that point to a record at a particular address.
:param opt:
:return:
"""
memory_handler = cli.get_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
child_record = ctx.get_record_at_address(args.address)
except ValueError as e:
print None
return
records = api.get_record_predecessors(memory_handler, child_record)
if len(records) == 0:
print None
else:
for p_record in records:
print '#0x%x\n%s\n' % (p_record.address, p_record.to_string())
return
def reverse_show_cmdline(args):
""" Show the record at a specific address. """
memory_handler = cli.get_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
st = ctx.get_record_at_address(args.address)
print st.to_string()
except ValueError:
print None
return
def reverse_cmdline(args):
""" Reverse """
from haystack.reverse import api as rapi
# get the memory handler adequate for the type requested
memory_handler = cli.get_memory_handler(args)
# do the search
rapi.reverse_instances(memory_handler)
return
def main_reverse():
argv = sys.argv[1:]
desc = REVERSE_DESC + cli.DUMPTYPE_BASE_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_folder_name', type=argparse_utils.readable, help='Use this memory dump folder')
reverse_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_BASE
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def minidump_reverse():
argv = sys.argv[1:]
desc = REVERSE_DESC + cli.DUMPTYPE_MINIDUMP_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_filename', type=argparse_utils.readable, help='Use this memory dump file')
reverse_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_MINIDUMP
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def main_reverse_show():
argv = sys.argv[1:]
desc = REVERSE_SHOW_DESC + cli.DUMPTYPE_BASE_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_folder_name', type=argparse_utils.readable, help='Use this memory dump folder')
reverse_show_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_BASE
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def minidump_reverse_show():
argv = sys.argv[1:]
desc = REVERSE_SHOW_DESC + cli.DUMPTYPE_MINIDUMP_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_filename', type=argparse_utils.readable, help='Use this memory dump file')
reverse_show_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_MINIDUMP
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def main_reverse_parents():
argv = sys.argv[1:]
desc = REVERSE_PARENT_DESC + cli.DUMPTYPE_BASE_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_folder_name', type=argparse_utils.readable, help='Use this memory dump folder')
reverse_parents_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_BASE
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def minidump_reverse_parents():
argv = sys.argv[1:]
desc = REVERSE_PARENT_DESC + cli.DUMPTYPE_MINIDUMP_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_filename', type=argparse_utils.readable, help='Use this memory dump file')
reverse_parents_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_MINIDUMP
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def main_reverse_hex():
argv = sys.argv[1:]
desc = REVERSE_HEX_DESC + cli.DUMPTYPE_BASE_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_folder_name', type=argparse_utils.readable, help='Use this memory dump folder')
reverse_hex_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_BASE
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def minidump_reverse_hex():
argv = sys.argv[1:]
desc = REVERSE_HEX_DESC + cli.DUMPTYPE_MINIDUMP_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('dump_filename', type=argparse_utils.readable, help='Use this memory dump file')
reverse_hex_argparser(rootparser)
opts = rootparser.parse_args(argv)
opts.dumptype = cli.DUMPTYPE_MINIDUMP
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
| 34.227679
| 113
| 0.732881
| 1,027
| 7,667
| 5.234664
| 0.125609
| 0.020833
| 0.034784
| 0.017857
| 0.77567
| 0.745536
| 0.745536
| 0.710007
| 0.701451
| 0.701451
| 0
| 0.003958
| 0.17621
| 7,667
| 223
| 114
| 34.381166
| 0.847213
| 0.053085
| 0
| 0.632258
| 0
| 0
| 0.099552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.006452
| 0.03871
| null | null | 0.045161
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
be1ce108645ef5ab424f6c5a625d728fcf2fffe8
| 28
|
py
|
Python
|
plotly/graph_objs/pointcloud/marker/__init__.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/graph_objs/pointcloud/marker/__init__.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/graph_objs/pointcloud/marker/__init__.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
from ._border import Border
| 14
| 27
| 0.821429
| 4
| 28
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07804282e55f19c38513d6468fa5eabd543a015d
| 195
|
py
|
Python
|
moto/acm/__init__.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 3
|
2020-08-04T20:29:41.000Z
|
2020-11-09T09:28:19.000Z
|
moto/acm/__init__.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 17
|
2020-08-28T12:53:56.000Z
|
2020-11-10T01:04:46.000Z
|
moto/acm/__init__.py
|
jonnangle/moto-1
|
40b4e299abb732aad7f56cc0f680c0a272a46594
|
[
"Apache-2.0"
] | 2
|
2021-11-24T08:05:43.000Z
|
2021-11-25T16:18:48.000Z
|
from __future__ import unicode_literals
from .models import acm_backends
from ..core.models import base_decorator
acm_backend = acm_backends["us-east-1"]
mock_acm = base_decorator(acm_backends)
| 27.857143
| 40
| 0.830769
| 29
| 195
| 5.172414
| 0.551724
| 0.22
| 0.213333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005682
| 0.097436
| 195
| 6
| 41
| 32.5
| 0.846591
| 0
| 0
| 0
| 0
| 0
| 0.046154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
07a20a822260bc272c73446a0812ab8a35cab98c
| 13,544
|
py
|
Python
|
newsapi/newsapi_client.py
|
jborchma/newsapi-python
|
f59f9d67bb218156becbe07740bb7d33a9a19c99
|
[
"MIT"
] | 1
|
2019-02-22T03:45:39.000Z
|
2019-02-22T03:45:39.000Z
|
newsapi/newsapi_client.py
|
jborchma/newsapi-python
|
f59f9d67bb218156becbe07740bb7d33a9a19c99
|
[
"MIT"
] | null | null | null |
newsapi/newsapi_client.py
|
jborchma/newsapi-python
|
f59f9d67bb218156becbe07740bb7d33a9a19c99
|
[
"MIT"
] | null | null | null |
import requests
from newsapi.newsapi_auth import NewsApiAuth
from newsapi import const
from newsapi.newsapi_exception import NewsAPIException
class NewsApiClient(object):
def __init__(self, api_key):
self.auth = NewsApiAuth(api_key=api_key)
def get_top_headlines(self, q=None, sources=None, language=None, country=None, category=None, page_size=None,
page=None):
"""
Returns live top and breaking headlines for a country, specific category in a country, single source, or multiple sources..
Optional parameters:
(str) q - return headlines w/ specific keyword or phrase. For example:
'bitcoin', 'trump', 'tesla', 'ethereum', etc.
(str) sources - return headlines of news sources! some Valid values are:
'bbc-news', 'the-verge', 'abc-news', 'crypto coins news',
'ary news','associated press','wired','aftenposten','australian financial review','axios',
'bbc news','bild','blasting news','bloomberg','business insider','engadget','google news',
'hacker news','info money,'recode','techcrunch','techradar','the next web','the verge' etc.
(str) language - The 2-letter ISO-639-1 code of the language you want to get headlines for. Valid values are:
'ar','de','en','es','fr','he','it','nl','no','pt','ru','se','ud','zh'
(str) country - The 2-letter ISO 3166-1 code of the country you want to get headlines! Valid values are:
'ae','ar','at','au','be','bg','br','ca','ch','cn','co','cu','cz','de','eg','fr','gb','gr',
'hk','hu','id','ie','il','in','it','jp','kr','lt','lv','ma','mx','my','ng','nl','no','nz',
'ph','pl','pt','ro','rs','ru','sa','se','sg','si','sk','th','tr','tw','ua','us'
(str) category - The category you want to get headlines for! Valid values are:
'business','entertainment','general','health','science','sports','technology'
(int) page_size - The number of results to return per page (request). 20 is the default, 100 is the maximum.
(int) page - Use this to page through the results if the total results found is greater than the page size.
"""
# Define Payload
payload = {}
# Keyword/Phrase
if q is not None:
if type(q) == str:
payload['q'] = q
else:
raise TypeError('keyword/phrase q param should be a of type str')
# Sources
if (sources is not None) and ((country is not None) or (category is not None)):
raise ValueError('cannot mix country/category param with sources param.')
# Sources
if sources is not None:
if type(sources) == str:
payload['sources'] = sources
else:
raise TypeError('sources param should be of type str')
# Language
if language is not None:
if type(language) == str:
if language in const.languages:
payload['language'] = language
else:
raise ValueError('invalid language')
else:
raise TypeError('language param should be of type str')
# Country
if country is not None:
if type(country) == str:
if country in const.countries:
payload['country'] = country
else:
raise ValueError('invalid country')
else:
raise TypeError('country param should be of type str')
# Category
if category is not None:
if type(category) == str:
if category in const.categories:
payload['category'] = category
else:
raise ValueError('invalid category')
else:
raise TypeError('category param should be of type str')
# Page Size
if page_size is not None:
if type(page_size) == int:
if 0 <= page_size <= 100:
payload['pageSize'] = page_size
else:
raise ValueError('page_size param should be an int between 1 and 100')
else:
raise TypeError('page_size param should be an int')
# Page
if page is not None:
if type(page) == int:
if page > 0:
payload['page'] = page
else:
raise ValueError('page param should be an int greater than 0')
else:
raise TypeError('page param should be an int')
# Send Request
r = requests.get(const.TOP_HEADLINES_URL, auth=self.auth, timeout=30, params=payload)
# Check Status of Request
if r.status_code != requests.codes.ok:
raise NewsAPIException(r.json())
return r.json()
def get_everything(self, q=None, sources=None, domains=None, from_param=None, to=None, language=None,
sort_by=None, page=None, page_size=None):
"""
Search through millions of articles from over 5,000 large and small news sources and blogs.
Optional parameters:
(str) q - return headlines w/ specified coin! Valid values are:
'bitcoin', 'trump', 'tesla', 'ethereum', etc
(str) sources - return headlines of news sources! some Valid values are:
'bbc-news', 'the-verge', 'abc-news', 'crypto coins news',
'ary news','associated press','wired','aftenposten','australian financial review','axios',
'bbc news','bild','blasting news','bloomberg','business insider','engadget','google news',
'hacker news','info money,'recode','techcrunch','techradar','the next web','the verge' etc.
(str) domains - A comma-seperated string of domains (eg bbc.co.uk, techcrunch.com, engadget.com) to restrict the search to.
(str) from_parameter - A date and optional time for the oldest article allowed.
(e.g. 2018-03-05 or 2018-03-05T03:46:15)
(str) to - A date and optional time for the newest article allowed.
(str) language - The 2-letter ISO-639-1 code of the language you want to get headlines for. Valid values are:
'ar','de','en','es','fr','he','it','nl','no','pt','ru','se','ud','zh'
(str) sort_by - The order to sort the articles in. Valid values are: 'relevancy','popularity','publishedAt'
(int) page_size - The number of results to return per page (request). 20 is the default, 100 is the maximum.
(int) page - Use this to page through the results if the total results found is greater than the page size.
"""
# Define Payload
payload = {}
# Keyword/Phrase
if q is not None:
if type(q) == str:
payload['q'] = q
else:
raise TypeError('keyword/phrase q param should be of type str')
# Sources
if sources is not None:
if type(sources) == str:
payload['sources'] = sources
else:
raise TypeError('sources param should be of type str')
# Domains To Search
if domains is not None:
if type(domains) == str:
payload['domains'] = domains
else:
raise TypeError('domains param should be of type str')
# Search From This Date ...
if from_param is not None:
if type(from_param) == str:
if (len(from_param)) >= 10:
for i in range(len(from_param)):
if (i == 4 and from_param[i] != '-') or (i == 7 and from_param[i] != '-'):
raise ValueError('from_param should be in the format of YYYY-MM-DD')
else:
payload['from'] = from_param
else:
raise ValueError('from_param should be in the format of YYYY-MM-DD')
else:
raise TypeError('from_param should be of type str')
# ... To This Date
if to is not None:
if type(to) == str:
if (len(to)) >= 10:
for i in range(len(to)):
if (i == 4 and to[i] != '-') or (i == 7 and to[i] != '-'):
raise ValueError('to should be in the format of YYYY-MM-DD')
else:
payload['to'] = to
else:
raise ValueError('to param should be in the format of YYYY-MM-DD')
else:
raise TypeError('to param should be of type str')
# Language
if language is not None:
if type(language) == str:
if language not in const.languages:
raise ValueError('invalid language')
else:
payload['language'] = language
else:
raise TypeError('language param should be of type str')
# Sort Method
if sort_by is not None:
if type(sort_by) == str:
if sort_by in const.sort_method:
payload['sortBy'] = sort_by
else:
raise ValueError('invalid sort')
else:
raise TypeError('sort_by param should be of type str')
# Page Size
if page_size is not None:
if type(page_size) == int:
if 0 <= page_size <= 100:
payload['pageSize'] = page_size
else:
raise ValueError('page_size param should be an int between 1 and 100')
else:
raise TypeError('page_size param should be an int')
# Page
if page is not None:
if type(page) == int:
if page > 0:
payload['page'] = page
else:
raise ValueError('page param should be an int greater than 0')
else:
raise TypeError('page param should be an int')
# Send Request
r = requests.get(const.EVERYTHING_URL, auth=self.auth, timeout=30, params=payload)
# Check Status of Request
if r.status_code != requests.codes.ok:
raise NewsAPIException(r.json())
return r.json()
def get_sources(self, category=None, language=None, country=None):
"""
Returns the subset of news publishers that top headlines...
Optional parameters:
(str) category - The category you want to get headlines for! Valid values are:
'business','entertainment','general','health','science','sports','technology'
(str) language - The 2-letter ISO-639-1 code of the language you want to get headlines for. Valid values are:
'ar','de','en','es','fr','he','it','nl','no','pt','ru','se','ud','zh'
(str) country - The 2-letter ISO 3166-1 code of the country you want to get headlines! Valid values are:
'ae','ar','at','au','be','bg','br','ca','ch','cn','co','cu','cz','de','eg','fr','gb','gr',
'hk','hu','id','ie','il','in','it','jp','kr','lt','lv','ma','mx','my','ng','nl','no','nz',
'ph','pl','pt','ro','rs','ru','sa','se','sg','si','sk','th','tr','tw','ua','us'
(str) category - The category you want to get headlines for! Valid values are:
'business','entertainment','general','health','science','sports','technology'
"""
# Define Payload
payload = {}
# Language
if language is not None:
if type(language) == str:
if language in const.languages:
payload['language'] = language
else:
raise ValueError('invalid language')
else:
raise TypeError('language param should be of type str')
# Country
if country is not None:
if type(country) == str:
if country in const.countries:
payload['country'] = country
else:
raise ValueError('invalid country')
else:
raise TypeError('country param should be of type str')
# Category
if category is not None:
if type(category) == str:
if category in const.categories:
payload['category'] = category
else:
raise ValueError('invalid category')
else:
raise TypeError('category param should be of type str')
# Send Request
r = requests.get(const.SOURCES_URL, auth=self.auth, timeout=30, params=payload)
# Check Status of Request
if r.status_code != requests.codes.ok:
raise NewsAPIException(r.json())
return r.json()
| 42.591195
| 135
| 0.5158
| 1,593
| 13,544
| 4.350282
| 0.166981
| 0.041558
| 0.048773
| 0.030159
| 0.787734
| 0.75671
| 0.741703
| 0.723232
| 0.723232
| 0.717027
| 0
| 0.011385
| 0.370939
| 13,544
| 317
| 136
| 42.725552
| 0.801995
| 0.434584
| 0
| 0.735632
| 0
| 0
| 0.161976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.022989
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07e6341724c87920a79484ebaa8329f4136d116a
| 13,884
|
py
|
Python
|
Tests/Test_rwlock.py
|
brucewxh/IntraArchiveDeduplicator
|
7b0c07cc9fffa75e1b7be285f42b0a8fad42dcfb
|
[
"BSD-3-Clause"
] | 86
|
2015-01-13T15:02:08.000Z
|
2021-12-24T02:13:03.000Z
|
Tests/Test_rwlock.py
|
brucewxh/IntraArchiveDeduplicator
|
7b0c07cc9fffa75e1b7be285f42b0a8fad42dcfb
|
[
"BSD-3-Clause"
] | 4
|
2016-11-18T20:08:50.000Z
|
2018-03-08T23:05:37.000Z
|
Tests/Test_rwlock.py
|
brucewxh/IntraArchiveDeduplicator
|
7b0c07cc9fffa75e1b7be285f42b0a8fad42dcfb
|
[
"BSD-3-Clause"
] | 12
|
2015-05-03T07:56:50.000Z
|
2021-03-11T12:38:56.000Z
|
##
## Unit testing code
## =================
##
import unittest
import threading
import time
import copy
import pyximport
pyximport.install()
import deduplicator.cyHamDb as hamDb
class Writer(threading.Thread):
def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time, to_write):
"""
@param buffer_: common buffer_ shared by the readers and writers
@type buffer_: list
@type rw_lock: L{RWLock}
@param init_sleep_time: sleep time before doing any action
@type init_sleep_time: C{float}
@param sleep_time: sleep time while in critical section
@type sleep_time: C{float}
@param to_write: data that will be appended to the buffer
"""
threading.Thread.__init__(self)
self.__buffer = buffer_
self.__rw_lock = rw_lock
self.__init_sleep_time = init_sleep_time
self.__sleep_time = sleep_time
self.__to_write = to_write
self.entry_time = None
"""Time of entry to the critical section"""
self.exit_time = None
"""Time of exit from the critical section"""
def run(self):
time.sleep(self.__init_sleep_time)
self.__rw_lock.get_write_lock()
self.entry_time = time.time()
# print("Writer sleeping", self.__sleep_time)
time.sleep(self.__sleep_time)
# print("Freeing write lock", self.__sleep_time)
self.__buffer.append(self.__to_write)
self.exit_time = time.time()
self.__rw_lock.free_write_lock()
class Reader(threading.Thread):
def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time):
"""
@param buffer_: common buffer shared by the readers and writers
@type buffer_: list
@type rw_lock: L{RWLock}
@param init_sleep_time: sleep time before doing any action
@type init_sleep_time: C{float}
@param sleep_time: sleep time while in critical section
@type sleep_time: C{float}
"""
threading.Thread.__init__(self)
self.__buffer = buffer_
self.__rw_lock = rw_lock
self.__init_sleep_time = init_sleep_time
self.__sleep_time = sleep_time
self.buffer_read = None
"""a copy of a the buffer read while in critical section"""
self.entry_time = None
"""Time of entry to the critical section"""
self.exit_time = None
"""Time of exit from the critical section"""
def run(self):
time.sleep(self.__init_sleep_time)
self.__rw_lock.get_read_lock()
self.entry_time = time.time()
# print("Reader sleeping", self.__sleep_time)
time.sleep(self.__sleep_time)
# print("Freeing read lock", self.__sleep_time)
self.buffer_read = copy.deepcopy(self.__buffer)
self.exit_time = time.time()
self.__rw_lock.free_read_lock()
class WriterContext(threading.Thread):
def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time, to_write):
"""
@param buffer_: common buffer_ shared by the readers and writers
@type buffer_: list
@type rw_lock: L{RWLock}
@param init_sleep_time: sleep time before doing any action
@type init_sleep_time: C{float}
@param sleep_time: sleep time while in critical section
@type sleep_time: C{float}
@param to_write: data that will be appended to the buffer
"""
threading.Thread.__init__(self)
self.__buffer = buffer_
self.__rw_lock = rw_lock
self.__init_sleep_time = init_sleep_time
self.__sleep_time = sleep_time
self.__to_write = to_write
self.entry_time = None
"""Time of entry to the critical section"""
self.exit_time = None
"""Time of exit from the critical section"""
def run(self):
time.sleep(self.__init_sleep_time)
with self.__rw_lock.writer_context():
self.entry_time = time.time()
# print("Writer sleeping", self.__sleep_time)
time.sleep(self.__sleep_time)
# print("Freeing write lock", self.__sleep_time)
self.__buffer.append(self.__to_write)
self.exit_time = time.time()
class ReaderContext(threading.Thread):
def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time):
"""
@param buffer_: common buffer shared by the readers and writers
@type buffer_: list
@type rw_lock: L{RWLock}
@param init_sleep_time: sleep time before doing any action
@type init_sleep_time: C{float}
@param sleep_time: sleep time while in critical section
@type sleep_time: C{float}
"""
threading.Thread.__init__(self)
self.__buffer = buffer_
self.__rw_lock = rw_lock
self.__init_sleep_time = init_sleep_time
self.__sleep_time = sleep_time
self.buffer_read = None
"""a copy of a the buffer read while in critical section"""
self.entry_time = None
"""Time of entry to the critical section"""
self.exit_time = None
"""Time of exit from the critical section"""
def run(self):
time.sleep(self.__init_sleep_time)
with self.__rw_lock.reader_context():
self.entry_time = time.time()
# print("Reader sleeping", self.__sleep_time)
time.sleep(self.__sleep_time)
# print("Freeing read lock", self.__sleep_time)
self.buffer_read = copy.deepcopy(self.__buffer)
self.exit_time = time.time()
class RWLockTestCase(unittest.TestCase):
# So overreleasing results in SIGILL
# because libpthread is retarded
def test_overrelease_read(self):
# print("Test: test_overrelease_read")
testLock = hamDb.BkHammingTree()
testLock.get_read_lock()
testLock.free_read_lock()
self.assertRaises(RuntimeError, testLock.free_read_lock)
def test_overrelease_write(self):
# print("Test: test_overrelease_write")
testLock = hamDb.BkHammingTree()
testLock.get_write_lock()
testLock.free_write_lock()
self.assertRaises(RuntimeError, testLock.free_write_lock)
###############################################################################################
###############################################################################################
###############################################################################################
def test_reentrant_read(self):
# print("Test: test_reentrant_read")
testLock = hamDb.BkHammingTree()
testLock.get_read_lock()
testLock.get_read_lock()
testLock.free_read_lock()
testLock.free_read_lock()
def test_non_reentrant_write(self):
# print("Test: test_non_reentrant_write")
testLock = hamDb.BkHammingTree()
testLock.get_write_lock()
self.assertRaises(RuntimeError, testLock.get_write_lock, blocking=False)
testLock.free_write_lock()
def test_readers_nonexclusive_access(self):
# print("Test: test_readers_nonexclusive_access")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Reader(buffer_, rw_lock, 0, 0))
threads.append(Writer(buffer_, rw_lock, 0.2, 0.4, 1))
threads.append(Reader(buffer_, rw_lock, 0.3, 0.3))
threads.append(Reader(buffer_, rw_lock, 0.5, 0))
self.__start_and_join_threads(threads)
## The third reader should enter after the second one but it should
## exit before the second one exits
## (i.e. the readers should be in the critical section
## at the same time)
self.assertEqual([], threads[0].buffer_read)
self.assertEqual([1], threads[2].buffer_read)
self.assertEqual([1], threads[3].buffer_read)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[2].entry_time <= threads[3].entry_time)
self.assert_(threads[3].exit_time < threads[2].exit_time)
def test_writers_exclusive_access(self):
# print("Test: test_writers_exclusive_access")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0.4, 1))
threads.append(Writer(buffer_, rw_lock, 0.1, 0, 2))
threads.append(Reader(buffer_, rw_lock, 0.2, 0))
self.__start_and_join_threads(threads)
## The second writer should wait for the first one to exit
self.assertEqual([1, 2], threads[2].buffer_read)
self.assert_(threads[0].exit_time <= threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].exit_time)
def test_writer_priority(self):
# print("Test: test_writer_priority")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
threads.append(Reader(buffer_, rw_lock, 0.1, 0.4))
threads.append(Writer(buffer_, rw_lock, 0.2, 0, 2))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
self.__start_and_join_threads(threads)
## The second writer should go before the second and the third reader
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2], threads[3].buffer_read)
self.assertEqual([1, 2], threads[4].buffer_read)
self.assert_(threads[0].exit_time < threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[2].exit_time <= threads[3].entry_time)
self.assert_(threads[2].exit_time <= threads[4].entry_time)
def test_many_writers_priority(self):
# print("Test: test_many_writers_priority")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
threads.append(Reader(buffer_, rw_lock, 0.1, 0.6))
threads.append(Writer(buffer_, rw_lock, 0.2, 0.1, 2))
threads.append(Reader(buffer_, rw_lock, 0.3, 0))
threads.append(Reader(buffer_, rw_lock, 0.4, 0))
threads.append(Writer(buffer_, rw_lock, 0.5, 0.1, 3))
self.__start_and_join_threads(threads)
## The two last writers should go first -- after the first reader and
## before the second and the third reader
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2, 3], threads[3].buffer_read)
self.assertEqual([1, 2, 3], threads[4].buffer_read)
self.assert_(threads[0].exit_time < threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[1].exit_time <= threads[5].entry_time)
self.assert_(threads[2].exit_time <= threads[3].entry_time)
self.assert_(threads[2].exit_time <= threads[4].entry_time)
self.assert_(threads[5].exit_time <= threads[3].entry_time)
self.assert_(threads[5].exit_time <= threads[4].entry_time)
def test_context_readers_nonexclusive_access(self):
# print("Test: test_context_readers_nonexclusive_access")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(ReaderContext(buffer_, rw_lock, 0, 0))
threads.append(WriterContext(buffer_, rw_lock, 0.2, 0.4, 1))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0.3))
threads.append(ReaderContext(buffer_, rw_lock, 0.5, 0))
self.__start_and_join_threads(threads)
## The third reader should enter after the second one but it should
## exit before the second one exits
## (i.e. the readers should be in the critical section
## at the same time)
self.assertEqual([], threads[0].buffer_read)
self.assertEqual([1], threads[2].buffer_read)
self.assertEqual([1], threads[3].buffer_read)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[2].entry_time <= threads[3].entry_time)
self.assert_(threads[3].exit_time < threads[2].exit_time)
def test_context_writers_exclusive_access(self):
# print("Test: test_context_writers_exclusive_access")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(WriterContext(buffer_, rw_lock, 0, 0.4, 1))
threads.append(WriterContext(buffer_, rw_lock, 0.1, 0, 2))
threads.append(ReaderContext(buffer_, rw_lock, 0.2, 0))
self.__start_and_join_threads(threads)
## The second writer should wait for the first one to exit
self.assertEqual([1, 2], threads[2].buffer_read)
self.assert_(threads[0].exit_time <= threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].exit_time)
def test_context_writer_priority(self):
# print("Test: test_context_writer_priority")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(WriterContext(buffer_, rw_lock, 0, 0, 1))
threads.append(ReaderContext(buffer_, rw_lock, 0.1, 0.4))
threads.append(WriterContext(buffer_, rw_lock, 0.2, 0, 2))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0))
self.__start_and_join_threads(threads)
## The second writer should go before the second and the third reader
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2], threads[3].buffer_read)
self.assertEqual([1, 2], threads[4].buffer_read)
self.assert_(threads[0].exit_time < threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[2].exit_time <= threads[3].entry_time)
self.assert_(threads[2].exit_time <= threads[4].entry_time)
def test_context_many_writers_priority(self):
# print("Test: test_context_many_writers_priority")
(buffer_, rw_lock, threads) = self.__init_variables()
threads.append(WriterContext(buffer_, rw_lock, 0, 0, 1))
threads.append(ReaderContext(buffer_, rw_lock, 0.1, 0.6))
threads.append(WriterContext(buffer_, rw_lock, 0.2, 0.1, 2))
threads.append(ReaderContext(buffer_, rw_lock, 0.3, 0))
threads.append(ReaderContext(buffer_, rw_lock, 0.4, 0))
threads.append(WriterContext(buffer_, rw_lock, 0.5, 0.1, 3))
self.__start_and_join_threads(threads)
## The two last writers should go first -- after the first reader and
## before the second and the third reader
self.assertEqual([1], threads[1].buffer_read)
self.assertEqual([1, 2, 3], threads[3].buffer_read)
self.assertEqual([1, 2, 3], threads[4].buffer_read)
self.assert_(threads[0].exit_time < threads[1].entry_time)
self.assert_(threads[1].exit_time <= threads[2].entry_time)
self.assert_(threads[1].exit_time <= threads[5].entry_time)
self.assert_(threads[2].exit_time <= threads[3].entry_time)
self.assert_(threads[2].exit_time <= threads[4].entry_time)
self.assert_(threads[5].exit_time <= threads[3].entry_time)
self.assert_(threads[5].exit_time <= threads[4].entry_time)
@staticmethod
def __init_variables():
buffer_ = []
rw_lock = hamDb.BkHammingTree()
threads = []
return (buffer_, rw_lock, threads)
@staticmethod
def __start_and_join_threads(threads):
for t in threads:
t.start()
for t in threads:
t.join()
| 35.060606
| 96
| 0.724287
| 2,066
| 13,884
| 4.547919
| 0.064376
| 0.043423
| 0.063857
| 0.049808
| 0.943912
| 0.926671
| 0.893572
| 0.857493
| 0.836633
| 0.780226
| 0
| 0.020383
| 0.130726
| 13,884
| 395
| 97
| 35.149367
| 0.758141
| 0.218381
| 0
| 0.699552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246637
| 1
| 0.098655
| false
| 0
| 0.03139
| 0
| 0.156951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
07f0e110fe7a2a70c7f2992e8d5b001d124947eb
| 5,338
|
py
|
Python
|
gprm/utils/wrapping_tools.py
|
siwill22/GPlatesClassStruggle
|
713a87ff4f054d3a493ec09e5f310aa3036d3bc5
|
[
"MIT"
] | 7
|
2020-05-04T03:05:09.000Z
|
2022-01-28T13:52:53.000Z
|
gprm/utils/wrapping_tools.py
|
siwill22/GPlatesClassStruggle
|
713a87ff4f054d3a493ec09e5f310aa3036d3bc5
|
[
"MIT"
] | null | null | null |
gprm/utils/wrapping_tools.py
|
siwill22/GPlatesClassStruggle
|
713a87ff4f054d3a493ec09e5f310aa3036d3bc5
|
[
"MIT"
] | 3
|
2021-05-23T01:53:52.000Z
|
2021-09-14T12:21:53.000Z
|
#
# Functions for wrapping geometries to dateline before returning request geojson
#
import pygplates
def wrap_polylines(polylines,lon0=0,tesselate_degrees=1):
data = {"type": "FeatureCollection"}
data["features"] = []
for polyline in polylines:
if lon0 is not None:
wrapper = pygplates.DateLineWrapper(lon0)
geometries = wrapper.wrap(polyline.get_geometry(),tesselate_degrees)
else:
geometries = polyline.get_geometries()
for geometry in geometries:
feature = {"type": "Feature"}
feature["geometry"] = {}
feature["geometry"]["type"] = "MultiLineString"
point_list = []
for point in geometry.get_points():
point_list.append((point.to_lat_lon()[1],point.to_lat_lon()[0]))
feature["geometry"]["coordinates"] = [point_list]
data["features"].append(feature)
return data
def wrap_polygons(polygons,lon0=0,tesselate_degrees=1):
data = {"type": "FeatureCollection"}
data["features"] = []
for polygon in polygons:
if lon0 is not None:
wrapper = pygplates.DateLineWrapper(lon0)
geometries = wrapper.wrap(polygon.get_geometry(),tesselate_degrees)
for geometry in geometries:
feature = {"type": "Feature"}
feature["geometry"] = {}
feature["geometry"]["type"] = "Polygon"
point_list = []
for point in geometry.get_exterior_points():
point_list.append((point.to_lat_lon()[1],point.to_lat_lon()[0]))
feature["geometry"]["coordinates"] = [point_list]
data["features"].append(feature)
else:
for geometry in polygon.get_geometries():
feature = {"type": "Feature"}
feature["geometry"] = {}
feature["geometry"]["type"] = "Polygon"
point_list = []
for point in geometry.get_points():
point_list.append((point.to_lat_lon()[1],point.to_lat_lon()[0]))
if geometry.get_orientation() == pygplates.PolygonOnSphere.Orientation.counter_clockwise:
point_list.reverse()
feature["geometry"]["coordinates"] = [point_list]
data["features"].append(feature)
return data
def wrap_reconstructed_polygons(reconstructed_polygons,lon0=0,tesselate_degrees=1):
data = {"type": "FeatureCollection"}
data["features"] = []
for reconstructed_polygon in reconstructed_polygons:
rev=False
if reconstructed_polygon.get_reconstructed_geometry().get_orientation() == pygplates.PolygonOnSphere.Orientation.counter_clockwise:
rev = True
if lon0 is not None:
wrapper = pygplates.DateLineWrapper(lon0)
geometries = wrapper.wrap(reconstructed_polygon.get_reconstructed_geometry(),tesselate_degrees)
for geometry in geometries:
feature = {"type": "Feature"}
feature["geometry"] = {}
feature["geometry"]["type"] = "Polygon"
point_list = []
for point in geometry.get_exterior_points():
point_list.append((point.to_lat_lon()[1],point.to_lat_lon()[0]))
if rev:
point_list.reverse()
feature["geometry"]["coordinates"] = [point_list]
data["features"].append(feature)
else:
geometry = reconstructed_polygon.get_reconstructed_geometry()
feature = {"type": "Feature"}
feature["geometry"] = {}
feature["geometry"]["type"] = "Polygon"
point_list = []
for point in geometry.get_points():
point_list.append((point.to_lat_lon()[1],point.to_lat_lon()[0]))
if rev:
point_list.reverse()
feature["geometry"]["coordinates"] = [point_list]
data["features"].append(feature)
return data
def wrap_plate_boundaries(shared_boundary_sections,lon0=0,tesselate_degrees=1):
data = {"type": "FeatureCollection"}
data["features"] = []
for shared_boundary_section in shared_boundary_sections:
for shared_sub_segment in shared_boundary_section.get_shared_sub_segments():
if lon0 is not None:
wrapper = pygplates.DateLineWrapper(lon0)
geometries = wrapper.wrap(shared_sub_segment.get_geometry(),tesselate_degrees)
else:
geometries = shared_sub_segment.get_geometries()
for geometry in geometries:
feature = {"type": "Feature"}
feature["geometry"] = {}
feature["geometry"]["type"] = "MultiLineString"
point_list = []
for point in geometry.get_points():
point_list.append((point.to_lat_lon()[1],point.to_lat_lon()[0]))
feature["geometry"]["coordinates"] = [point_list]
feature["feature_type"] = str(shared_sub_segment.get_feature().get_feature_type())
feature["Length"] = float(shared_sub_segment.get_geometry().get_arc_length())
data["features"].append(feature)
return data
| 40.135338
| 139
| 0.586737
| 527
| 5,338
| 5.721063
| 0.132827
| 0.062687
| 0.039801
| 0.051741
| 0.81592
| 0.763184
| 0.724378
| 0.724378
| 0.675954
| 0.675954
| 0
| 0.00849
| 0.29393
| 5,338
| 132
| 140
| 40.439394
| 0.791457
| 0.014612
| 0
| 0.771429
| 0
| 0
| 0.10274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038095
| false
| 0
| 0.009524
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed19ccb72a8f0fd72b29f329be72f2b260ce60e6
| 46
|
py
|
Python
|
ple/games/__init__.py
|
GokulNC/Helicopter-Game-Reinforcement-Learning
|
c42eea71294bfc0dd2507d33c319e3b6d6e898e0
|
[
"MIT"
] | null | null | null |
ple/games/__init__.py
|
GokulNC/Helicopter-Game-Reinforcement-Learning
|
c42eea71294bfc0dd2507d33c319e3b6d6e898e0
|
[
"MIT"
] | null | null | null |
ple/games/__init__.py
|
GokulNC/Helicopter-Game-Reinforcement-Learning
|
c42eea71294bfc0dd2507d33c319e3b6d6e898e0
|
[
"MIT"
] | 2
|
2019-10-04T05:39:09.000Z
|
2019-12-14T12:08:53.000Z
|
from ple.games.pixelcopter import Pixelcopter
| 23
| 45
| 0.869565
| 6
| 46
| 6.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed4d4efe066b0f4fdca9a1931dc1356f60e3815d
| 17,698
|
py
|
Python
|
tests/components/recorder/test_filters_with_entityfilter.py
|
mib1185/core
|
b17d4ac65cde9a27ff6032d70b148792e5eba8df
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/recorder/test_filters_with_entityfilter.py
|
mib1185/core
|
b17d4ac65cde9a27ff6032d70b148792e5eba8df
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/recorder/test_filters_with_entityfilter.py
|
mib1185/core
|
b17d4ac65cde9a27ff6032d70b148792e5eba8df
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""The tests for the recorder filter matching the EntityFilter component."""
import json
from sqlalchemy import select
from sqlalchemy.engine.row import Row
from homeassistant.components.recorder import get_instance
from homeassistant.components.recorder.db_schema import EventData, States
from homeassistant.components.recorder.filters import (
Filters,
extract_include_exclude_filter_conf,
sqlalchemy_filter_from_include_exclude_conf,
)
from homeassistant.components.recorder.util import session_scope
from homeassistant.const import ATTR_ENTITY_ID, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entityfilter import (
CONF_DOMAINS,
CONF_ENTITIES,
CONF_ENTITY_GLOBS,
CONF_EXCLUDE,
CONF_INCLUDE,
convert_include_exclude_filter,
)
from .common import async_wait_recording_done
async def _async_get_states_and_events_with_filter(
hass: HomeAssistant, sqlalchemy_filter: Filters, entity_ids: set[str]
) -> tuple[list[Row], list[Row]]:
"""Get states from the database based on a filter."""
for entity_id in entity_ids:
hass.states.async_set(entity_id, STATE_ON)
hass.bus.async_fire("any", {ATTR_ENTITY_ID: entity_id})
await async_wait_recording_done(hass)
def _get_states_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(States.entity_id).filter(
sqlalchemy_filter.states_entity_filter()
)
).all()
filtered_states_entity_ids = {
row[0]
for row in await get_instance(hass).async_add_executor_job(
_get_states_with_session
)
}
def _get_events_with_session():
with session_scope(hass=hass) as session:
return session.execute(
select(EventData.shared_data).filter(
sqlalchemy_filter.events_entity_filter()
)
).all()
filtered_events_entity_ids = set()
for row in await get_instance(hass).async_add_executor_job(
_get_events_with_session
):
event_data = json.loads(row[0])
if ATTR_ENTITY_ID not in event_data:
continue
filtered_events_entity_ids.add(json.loads(row[0])[ATTR_ENTITY_ID])
return filtered_states_entity_ids, filtered_events_entity_ids
async def test_included_and_excluded_simple_case_no_domains(hass, recorder_mock):
"""Test filters with included and excluded without domains."""
filter_accept = {"sensor.kitchen4", "switch.kitchen"}
filter_reject = {
"light.any",
"switch.other",
"cover.any",
"sensor.weather5",
"light.kitchen",
}
conf = {
CONF_INCLUDE: {
CONF_ENTITY_GLOBS: ["sensor.kitchen*"],
CONF_ENTITIES: ["switch.kitchen"],
},
CONF_EXCLUDE: {
CONF_ENTITY_GLOBS: ["sensor.weather*"],
CONF_ENTITIES: ["light.kitchen"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
assert not entity_filter.explicitly_included("light.any")
assert not entity_filter.explicitly_included("switch.other")
assert entity_filter.explicitly_included("sensor.kitchen4")
assert entity_filter.explicitly_included("switch.kitchen")
assert not entity_filter.explicitly_excluded("light.any")
assert not entity_filter.explicitly_excluded("switch.other")
assert entity_filter.explicitly_excluded("sensor.weather5")
assert entity_filter.explicitly_excluded("light.kitchen")
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_included_and_excluded_simple_case_no_globs(hass, recorder_mock):
"""Test filters with included and excluded without globs."""
filter_accept = {"switch.bla", "sensor.blu", "sensor.keep"}
filter_reject = {"sensor.bli"}
conf = {
CONF_INCLUDE: {
CONF_DOMAINS: ["sensor", "homeassistant"],
CONF_ENTITIES: ["switch.bla"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["switch"],
CONF_ENTITIES: ["sensor.bli"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_included_and_excluded_simple_case_without_underscores(
hass, recorder_mock
):
"""Test filters with included and excluded without underscores."""
filter_accept = {"light.any", "sensor.kitchen4", "switch.kitchen"}
filter_reject = {"switch.other", "cover.any", "sensor.weather5", "light.kitchen"}
conf = {
CONF_INCLUDE: {
CONF_DOMAINS: ["light"],
CONF_ENTITY_GLOBS: ["sensor.kitchen*"],
CONF_ENTITIES: ["switch.kitchen"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["cover"],
CONF_ENTITY_GLOBS: ["sensor.weather*"],
CONF_ENTITIES: ["light.kitchen"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
assert not entity_filter.explicitly_included("light.any")
assert not entity_filter.explicitly_included("switch.other")
assert entity_filter.explicitly_included("sensor.kitchen4")
assert entity_filter.explicitly_included("switch.kitchen")
assert not entity_filter.explicitly_excluded("light.any")
assert not entity_filter.explicitly_excluded("switch.other")
assert entity_filter.explicitly_excluded("sensor.weather5")
assert entity_filter.explicitly_excluded("light.kitchen")
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_included_and_excluded_simple_case_with_underscores(hass, recorder_mock):
"""Test filters with included and excluded with underscores."""
filter_accept = {"light.any", "sensor.kitchen_4", "switch.kitchen"}
filter_reject = {"switch.other", "cover.any", "sensor.weather_5", "light.kitchen"}
conf = {
CONF_INCLUDE: {
CONF_DOMAINS: ["light"],
CONF_ENTITY_GLOBS: ["sensor.kitchen_*"],
CONF_ENTITIES: ["switch.kitchen"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["cover"],
CONF_ENTITY_GLOBS: ["sensor.weather_*"],
CONF_ENTITIES: ["light.kitchen"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
assert not entity_filter.explicitly_included("light.any")
assert not entity_filter.explicitly_included("switch.other")
assert entity_filter.explicitly_included("sensor.kitchen_4")
assert entity_filter.explicitly_included("switch.kitchen")
assert not entity_filter.explicitly_excluded("light.any")
assert not entity_filter.explicitly_excluded("switch.other")
assert entity_filter.explicitly_excluded("sensor.weather_5")
assert entity_filter.explicitly_excluded("light.kitchen")
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_included_and_excluded_complex_case(hass, recorder_mock):
"""Test filters with included and excluded with a complex filter."""
filter_accept = {"light.any", "sensor.kitchen_4", "switch.kitchen"}
filter_reject = {
"camera.one",
"notify.any",
"automation.update_readme",
"automation.update_utilities_cost",
"binary_sensor.iss",
}
conf = {
CONF_INCLUDE: {
CONF_ENTITIES: ["group.trackers"],
},
CONF_EXCLUDE: {
CONF_ENTITIES: [
"automation.update_readme",
"automation.update_utilities_cost",
"binary_sensor.iss",
],
CONF_DOMAINS: [
"camera",
"group",
"media_player",
"notify",
"scene",
"sun",
"zone",
],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_included_entities_and_excluded_domain(hass, recorder_mock):
"""Test filters with included entities and excluded domain."""
filter_accept = {
"media_player.test",
"media_player.test3",
"thermostat.test",
"zone.home",
"script.can_cancel_this_one",
}
filter_reject = {
"thermostat.test2",
}
conf = {
CONF_INCLUDE: {
CONF_ENTITIES: ["media_player.test", "thermostat.test"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["thermostat"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_same_domain_included_excluded(hass, recorder_mock):
"""Test filters with the same domain included and excluded."""
filter_accept = {
"media_player.test",
"media_player.test3",
}
filter_reject = {
"thermostat.test2",
"thermostat.test",
"zone.home",
"script.can_cancel_this_one",
}
conf = {
CONF_INCLUDE: {
CONF_DOMAINS: ["media_player"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["media_player"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_same_entity_included_excluded(hass, recorder_mock):
"""Test filters with the same entity included and excluded."""
filter_accept = {
"media_player.test",
}
filter_reject = {
"media_player.test3",
"thermostat.test2",
"thermostat.test",
"zone.home",
"script.can_cancel_this_one",
}
conf = {
CONF_INCLUDE: {
CONF_ENTITIES: ["media_player.test"],
},
CONF_EXCLUDE: {
CONF_ENTITIES: ["media_player.test"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
async def test_same_entity_included_excluded_include_domain_wins(hass, recorder_mock):
"""Test filters with domain and entities and the include domain wins."""
filter_accept = {
"media_player.test2",
"media_player.test3",
"thermostat.test",
}
filter_reject = {
"thermostat.test2",
"zone.home",
"script.can_cancel_this_one",
}
conf = {
CONF_INCLUDE: {
CONF_DOMAINS: ["media_player"],
CONF_ENTITIES: ["thermostat.test"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["thermostat"],
CONF_ENTITIES: ["media_player.test"],
},
}
extracted_filter = extract_include_exclude_filter_conf(conf)
entity_filter = convert_include_exclude_filter(extracted_filter)
sqlalchemy_filter = sqlalchemy_filter_from_include_exclude_conf(extracted_filter)
assert sqlalchemy_filter is not None
for entity_id in filter_accept:
assert entity_filter(entity_id) is True
for entity_id in filter_reject:
assert entity_filter(entity_id) is False
(
filtered_states_entity_ids,
filtered_events_entity_ids,
) = await _async_get_states_and_events_with_filter(
hass, sqlalchemy_filter, filter_accept | filter_reject
)
assert filtered_states_entity_ids == filter_accept
assert not filtered_states_entity_ids.intersection(filter_reject)
assert filtered_events_entity_ids == filter_accept
assert not filtered_events_entity_ids.intersection(filter_reject)
| 34.232108
| 87
| 0.70556
| 2,054
| 17,698
| 5.66407
| 0.070594
| 0.047189
| 0.051573
| 0.059309
| 0.845625
| 0.831872
| 0.817775
| 0.806515
| 0.780815
| 0.768867
| 0
| 0.001806
| 0.217821
| 17,698
| 516
| 88
| 34.29845
| 0.838619
| 0.003955
| 0
| 0.644231
| 0
| 0
| 0.09787
| 0.012712
| 0
| 0
| 0
| 0
| 0.209135
| 1
| 0.004808
| false
| 0
| 0.026442
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed5bdd9e0b7bf3ba36ce4a3d2569c659e98ee5a8
| 6,043
|
py
|
Python
|
tensorflow/c01/t4.py
|
tomsnail/opencv_tf_py
|
cf9aa7fa250546564cff56aa33b5a39991b0d8f1
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/c01/t4.py
|
tomsnail/opencv_tf_py
|
cf9aa7fa250546564cff56aa33b5a39991b0d8f1
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/c01/t4.py
|
tomsnail/opencv_tf_py
|
cf9aa7fa250546564cff56aa33b5a39991b0d8f1
|
[
"Apache-2.0"
] | 1
|
2020-05-22T09:19:56.000Z
|
2020-05-22T09:19:56.000Z
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#修改代价函数为交叉熵
def mnist_op1():
#载入数据
mnist = input_data.read_data_sets("./../../datas/mnist/",one_hot=True)
#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
#定义两个
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#创建神经网络
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,W)+b)
# 二次代价函数
# loss = tf.reduce_mean(tf.square(y - prediction))
#交叉熵
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
# 梯度下降训练函数
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()
#定义准确率
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大值所在的位置
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# 开始训练
with tf.Session() as sess:
sess.run(init)
for epoch in range(100):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
print(epoch,acc)
pass
#增加网络层次和节点,并使用dropout防止过拟合
def mnist_op2():
#载入数据
mnist = input_data.read_data_sets("./../../datas/mnist/",one_hot=True)
#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
print(n_batch)
#定义两个
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#定义神经元dropout参数
keep_prob = tf.placeholder(tf.float32)
#创建神经网络
W1 = tf.Variable(tf.truncated_normal([784,2000],stddev=0.1))
b1 = tf.Variable(tf.zeros([2000])+0.1)
L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)
L1_drop = tf.nn.dropout(L1,keep_prob)
W2 = tf.Variable(tf.truncated_normal([2000, 2000], stddev=0.1))
b2 = tf.Variable(tf.zeros([2000]) + 0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop, W2) + b2)
L2_drop = tf.nn.dropout(L2, keep_prob)
W3 = tf.Variable(tf.truncated_normal([2000, 1000], stddev=0.1))
b3 = tf.Variable(tf.zeros([1000]) + 0.1)
L3 = tf.nn.tanh(tf.matmul(L2_drop, W3) + b3)
L3_drop = tf.nn.dropout(L3, keep_prob)
W4 = tf.Variable(tf.truncated_normal([1000, 10], stddev=0.1))
b4 = tf.Variable(tf.zeros([10]) + 0.1)
prediction = tf.nn.softmax(tf.matmul(L3_drop,W4)+b4)
# 二次代价函数
# loss = tf.reduce_mean(tf.square(y - prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=prediction))
# 梯度下降训练函数
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()
#定义准确率
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大值所在的位置
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
saver = tf.train.Saver() # 定义saver
# 开始训练
with tf.Session() as sess:
sess.run(init)
for epoch in range(51):
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:0.7})
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
# train_acc = sess.run(accuracy, feed_dict={x: mnist.train.images, y: mnist.train.labels, keep_prob: 1.0})
print(epoch,test_acc)
saver.save(sess, './../../datas/model/t4/mnist_model.ckpt') # 模型储存位置
pass
#修改优化器
def mnist_op3():
#载入数据
mnist = input_data.read_data_sets("./../../datas/mnist/",one_hot=True)
#每个批次的大小
batch_size = 100
#计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size
print(n_batch)
#定义两个
x = tf.placeholder(tf.float32,[None,784])
y = tf.placeholder(tf.float32,[None,10])
#定义神经元dropout参数
keep_prob = tf.placeholder(tf.float32)
lr = tf.Variable(0.001,dtype=tf.float32)
#创建神经网络
W1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1))
b1 = tf.Variable(tf.zeros([500])+0.1)
L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)
L1_drop = tf.nn.dropout(L1,keep_prob)
W2 = tf.Variable(tf.truncated_normal([500, 300], stddev=0.1))
b2 = tf.Variable(tf.zeros([300]) + 0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop, W2) + b2)
L2_drop = tf.nn.dropout(L2, keep_prob)
W4 = tf.Variable(tf.truncated_normal([300, 10], stddev=0.1))
b4 = tf.Variable(tf.zeros([10]) + 0.1)
prediction = tf.nn.softmax(tf.matmul(L2_drop,W4)+b4)
# 交叉熵
# loss = tf.reduce_mean(tf.square(y - prediction))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=prediction))
# 梯度下降训练函数
# train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# 初始化变量
init = tf.global_variables_initializer()
#定义准确率
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大值所在的位置
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
# 开始训练
with tf.Session() as sess:
sess.run(init)
for epoch in range(51):
sess.run(tf.assign(lr,0.001 * ( 0.95 ** epoch)))
for batch in range(n_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys,keep_prob:1.0})
test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
print(epoch,test_acc)
pass
if __name__ == '__main__':
mnist_op3()
| 32.143617
| 118
| 0.656131
| 909
| 6,043
| 4.20022
| 0.167217
| 0.044526
| 0.050288
| 0.040073
| 0.842064
| 0.832897
| 0.814563
| 0.812467
| 0.758512
| 0.733106
| 0
| 0.050586
| 0.195267
| 6,043
| 188
| 119
| 32.143617
| 0.734526
| 0.108224
| 0
| 0.6
| 0
| 0
| 0.024513
| 0.007298
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0.028571
| 0.047619
| 0
| 0.07619
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ed5d0c5572937dcc728bfc989d3fc2bbbfcb441b
| 29
|
py
|
Python
|
examples/performance/__init__.py
|
reguly/devito
|
543b7be41ddbf1faa90224cca3824767756c9390
|
[
"MIT"
] | 204
|
2020-01-09T11:27:58.000Z
|
2022-03-20T22:53:37.000Z
|
examples/performance/__init__.py
|
reguly/devito
|
543b7be41ddbf1faa90224cca3824767756c9390
|
[
"MIT"
] | 949
|
2016-04-25T11:41:34.000Z
|
2019-12-27T10:43:40.000Z
|
tests/integration/bot/__init__.py
|
RaenonX/Jelly-Bot-API
|
c7da1e91783dce3a2b71b955b3a22b68db9056cf
|
[
"MIT"
] | 131
|
2020-01-08T17:43:13.000Z
|
2022-03-27T11:36:47.000Z
|
from .utils import * # noqa
| 14.5
| 28
| 0.655172
| 4
| 29
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 29
| 1
| 29
| 29
| 0.863636
| 0.137931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ed608d9077bda5ff4cba553de4aa147c887d57a6
| 208
|
py
|
Python
|
Day5/adventofcode5.py
|
oomoepoo/Adventofcode2k19
|
8cb4837b021bbffb7a8800b6d810eb1144ce8867
|
[
"Unlicense"
] | null | null | null |
Day5/adventofcode5.py
|
oomoepoo/Adventofcode2k19
|
8cb4837b021bbffb7a8800b6d810eb1144ce8867
|
[
"Unlicense"
] | null | null | null |
Day5/adventofcode5.py
|
oomoepoo/Adventofcode2k19
|
8cb4837b021bbffb7a8800b6d810eb1144ce8867
|
[
"Unlicense"
] | null | null | null |
def operate(opcode, arg1, arg2):
if (opcode==1):
return arg1+arg2
elif (opcode==2):
return arg1*arg2
elif (opcode==3):
return arg1
elif (opcode==4):
return arg1
| 23.111111
| 32
| 0.548077
| 27
| 208
| 4.222222
| 0.444444
| 0.350877
| 0.245614
| 0.315789
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 0.322115
| 208
| 9
| 33
| 23.111111
| 0.723404
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9c60a1182852ba1c524f7185a2786c9a8943315f
| 4,843
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_scatter_op.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | 1
|
2019-10-10T03:47:33.000Z
|
2019-10-10T03:47:33.000Z
|
python/paddle/fluid/tests/unittests/test_scatter_op.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | 1
|
2019-07-30T05:22:32.000Z
|
2019-07-30T05:22:32.000Z
|
python/paddle/fluid/tests/unittests/test_scatter_op.py
|
liym27/Paddle
|
50582071dce846a973a054c40fe194069657960a
|
[
"Apache-2.0"
] | 1
|
2020-02-21T07:40:27.000Z
|
2020-02-21T07:40:27.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
class TestScatterOp(OpTest):
def setUp(self):
self.op_type = "scatter"
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['Updates'], 'Out', in_place=True)
class TestScatterOp0(OpTest):
def setUp(self):
self.op_type = "scatter"
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.attrs = {'overwrite': True}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['Updates'], 'Out', in_place=True)
class TestScatterOp1(OpTest):
def setUp(self):
self.op_type = "scatter"
ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32')
index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = zeros_np
for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False}
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['Updates'], 'Out', in_place=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestScatterOp2(OpTest):
def setUp(self):
self.op_type = "scatter"
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = updates_np
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['Updates'], 'Out', in_place=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestScatterOp3(OpTest):
def setUp(self):
self.op_type = "scatter"
ref_np = np.ones((3, 3)).astype("float32")
zeros_np = np.zeros([2, 3]).astype('float32')
index_np = np.array([1, 1]).astype("int32")
updates_np = np.random.random((2, 3)).astype("float32")
output_np = np.copy(ref_np)
output_np[index_np] = zeros_np
for i in range(0, len(index_np)):
output_np[index_np[i]] += updates_np[i]
self.attrs = {'overwrite': False}
self.inputs = {'X': ref_np, 'Ids': index_np, 'Updates': updates_np}
self.outputs = {'Out': output_np}
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['Updates'], 'Out', in_place=True)
if __name__ == "__main__":
unittest.main()
| 35.610294
| 80
| 0.629155
| 687
| 4,843
| 4.216885
| 0.187773
| 0.030376
| 0.057991
| 0.036244
| 0.762858
| 0.762858
| 0.762858
| 0.762858
| 0.762858
| 0.762858
| 0
| 0.02422
| 0.232707
| 4,843
| 135
| 81
| 35.874074
| 0.755382
| 0.12038
| 0
| 0.867347
| 0
| 0
| 0.08459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153061
| false
| 0
| 0.05102
| 0
| 0.255102
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9c610da191eb72ad34c7b9c1b2e825f23f17257d
| 37
|
py
|
Python
|
pyro/planning/__init__.py
|
SherbyRobotics/PyRobotics
|
86eb1189258f6f41642a149c813dd2fd6853bcc1
|
[
"MIT"
] | 14
|
2019-05-03T15:22:38.000Z
|
2022-03-14T15:31:54.000Z
|
pyro/planning/__init__.py
|
SherbyRobotics/PyRobotics
|
86eb1189258f6f41642a149c813dd2fd6853bcc1
|
[
"MIT"
] | 9
|
2019-08-01T14:22:13.000Z
|
2021-06-12T01:44:50.000Z
|
pyro/planning/__init__.py
|
SherbyRobotics/PyRobotics
|
86eb1189258f6f41642a149c813dd2fd6853bcc1
|
[
"MIT"
] | 9
|
2019-05-21T12:38:36.000Z
|
2022-03-29T16:28:45.000Z
|
from .plan import OpenLoopController
| 37
| 37
| 0.864865
| 4
| 37
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9c7dd97a072e77413324988ee7f723748f96b90e
| 157
|
py
|
Python
|
tests/test_core.py
|
VincentRouvreau/dockerhub_automated_build
|
6d85b208c5e7f1f43a29f42df7febf5faa3b9530
|
[
"MIT"
] | null | null | null |
tests/test_core.py
|
VincentRouvreau/dockerhub_automated_build
|
6d85b208c5e7f1f43a29f42df7febf5faa3b9530
|
[
"MIT"
] | null | null | null |
tests/test_core.py
|
VincentRouvreau/dockerhub_automated_build
|
6d85b208c5e7f1f43a29f42df7febf5faa3b9530
|
[
"MIT"
] | null | null | null |
import pytest
from sample import determinant
def test_matrice_determinant():
assert determinant([[6,1,1], [4, -2, 5], [2,8,7]]) == pytest.approx(-306.0)
| 31.4
| 79
| 0.687898
| 25
| 157
| 4.24
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094891
| 0.127389
| 157
| 5
| 79
| 31.4
| 0.678832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92c13d74b9efaafc78241e7b3abd20ba1aa80044
| 138
|
py
|
Python
|
lefi/exts/commands/core/__init__.py
|
Moros0741/Lefi
|
707dfcee45c386c4e9a70c776ac8ed1d0417bc14
|
[
"MIT"
] | null | null | null |
lefi/exts/commands/core/__init__.py
|
Moros0741/Lefi
|
707dfcee45c386c4e9a70c776ac8ed1d0417bc14
|
[
"MIT"
] | null | null | null |
lefi/exts/commands/core/__init__.py
|
Moros0741/Lefi
|
707dfcee45c386c4e9a70c776ac8ed1d0417bc14
|
[
"MIT"
] | null | null | null |
from .command import *
from .context import *
from .cooldowns import *
from .handler import *
from .parser import *
from .plugin import *
| 19.714286
| 24
| 0.73913
| 18
| 138
| 5.666667
| 0.444444
| 0.490196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 138
| 6
| 25
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
92d308697281c033358d86647594e3ed10099bb4
| 12,874
|
py
|
Python
|
src/envs/utils.py
|
tlan95/captionRL-env
|
72fa485b3e486f85fc58063a8f68bffcfcce4bd9
|
[
"MIT"
] | null | null | null |
src/envs/utils.py
|
tlan95/captionRL-env
|
72fa485b3e486f85fc58063a8f68bffcfcce4bd9
|
[
"MIT"
] | null | null | null |
src/envs/utils.py
|
tlan95/captionRL-env
|
72fa485b3e486f85fc58063a8f68bffcfcce4bd9
|
[
"MIT"
] | 1
|
2021-12-07T16:40:51.000Z
|
2021-12-07T16:40:51.000Z
|
from object2urdf import ObjectUrdfBuilder
import numpy as np
from matplotlib import pyplot as plt
import pybullet as p
import os
from src.envs.envList import *
from src.envs.reward_function import *
from src.envs.env_params import get_env_params
from src.envs.descriptions import generate_all_descriptions
def generate_urdfs(object_folder="./src/envs/ShapeNet/VEHICLE/"):
# Build entire libraries of URDFs
builder = ObjectUrdfBuilder(object_folder)
builder.build_library(force_overwrite=True, decompose_concave=True, force_decompose=False, center = 'top')
# # Generate urdf files for ShapeNet objects
# import os
# path = os.path.dirname(__file__)
# generate_urdfs(path + "/ShapeNet/VEHICLE/" )
# # Reconstruct the initial and final image of one episode. Finally we want the rgb_matrix for every state image of each episode.
# pixels = 600
# viewMatrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[-0.15, 0.14, 0.15], distance=1.3, yaw=-30, pitch=-30, roll=0, upAxisIndex=2)
# projectionMatrix = p.computeProjectionMatrixFOV(fov=50, aspect=1, nearVal=0.01, farVal=10)
# with np.load('./src/envs/collected_data/UR5/Tianwei/obs_act_etc/84/data.npz', allow_pickle=True) as data:
# obj_stuff_data = data['obj_stuff']
# obs_init = data['obs'][0]
# obs_final = data['obs'][-1]
# env_stuff_data_init = []
# for element in obj_stuff_data:
# env_stuff_data_init.append(element)
# env_stuff_data_init.append(obs_init)
# env_stuff_data_final = []
# for element in obj_stuff_data:
# env_stuff_data_final.append(element)
# env_stuff_data_final.append(obs_final)
# joint_poses_init = data['joint_poses'][0]
# joint_poses_final = data['joint_poses'][-1]
# print(env_stuff_data_init)
# print(env_stuff_data_final)
# # print(joint_poses_init)
# # print(joint_poses_final)
# env = UR5PlayAbsRPY1Obj()
# # save all descriptions from initial state to final state
# params = get_env_params()
# train_des, test_des = sample_descriptions_from_state(obs_init, obs_final, obj_stuff_data, params)
# print("train descriptions: ", train_des)
# print("test descriptions: ", test_des)
# # save initial image of an episode
# env.reset(o=env_stuff_data_init[2], description=None, info_reset=env_stuff_data_init[:2], joint_poses = joint_poses_init)
# img_arr_init = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
# renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# plot1 = plt.figure(1)
# plt.imshow(img_arr_init)
# # save final image of an episode
# env.reset(o=env_stuff_data_final[2], description=None, info_reset=env_stuff_data_final[:2], joint_poses = joint_poses_final)
# img_arr_final = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
# renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# plot2 = plt.figure(2)
# plt.imshow(img_arr_final)
# plt.show()
# # See the goal of each episode, and check if all rewards are True
# goals = {}
# r = {}
# nb_true_reward = 0
# for i in range(101):
# with np.load('./src/envs/collected_data/UR5/Tianwei6/obs_act_etc/' + str(i) + '/data.npz', allow_pickle=True) as data:
# g = data['goal_str']
# goals[i] = g
# obj_stuff_data = data['obj_stuff']
# obs_init = data['obs'][0]
# obs_final = data['obs'][-1]
# params = get_env_params()
# reward = False
# for gl in g:
# reward = get_reward_from_state(obs_init, obs_final, obj_stuff_data, gl, params)
# r[i] = reward
# if reward == True:
# nb_true_reward = nb_true_reward + 1
# print(goals)
# print(r)
# print("Nb true reward: ", nb_true_reward)
# # Reconstruct the initial and final image for each episode. We would like to save the initial and final image in JPG for each recorded episode (train and test separately).
# pixels = 600
# viewMatrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[-0.15, 0.14, 0.15], distance=1.3, yaw=-30, pitch=-30, roll=0, upAxisIndex=2)
# projectionMatrix = p.computeProjectionMatrixFOV(fov=50, aspect=1, nearVal=0.01, farVal=10)
# for i in range(101):
# if os.path.exists('./src/envs/collected_data/UR5/Tianwei6/obs_act_etc/' + str(i) + '/'):
# with np.load('./src/envs/collected_data/UR5/Tianwei6/obs_act_etc/' + str(i) + '/data.npz', allow_pickle=True) as data:
# goal = data['goal_str']
# obj_stuff_data = data['obj_stuff']
# obs_init = data['obs'][0]
# obs_final = data['obs'][-1]
# env_stuff_data_init = []
# for element in obj_stuff_data:
# env_stuff_data_init.append(element)
# env_stuff_data_init.append(obs_init)
# env_stuff_data_final = []
# for element in obj_stuff_data:
# env_stuff_data_final.append(element)
# env_stuff_data_final.append(obs_final)
# joint_poses_init = data['joint_poses'][0]
# joint_poses_final = data['joint_poses'][-1]
# env = UR5PlayAbsRPY1Obj()
# params = get_env_params()
# train_descriptions, test_descriptions, all_descriptions = generate_all_descriptions(params)
# train_des, test_des = sample_descriptions_from_state(obs_init, obs_final, obj_stuff_data, params)
# if not os.path.exists('./src/envs/dataset_images/'):
# os.makedirs('./src/envs/dataset_images/')
# if not os.path.exists('./src/envs/dataset_images/train/'):
# os.makedirs('./src/envs/dataset_images/train/')
# if not os.path.exists('./src/envs/dataset_images/test/'):
# os.makedirs('./src/envs/dataset_images/test/')
# if train_des:
# count_train = len(list(os.listdir('./src/envs/dataset_images/train/')))
# os.makedirs('./src/envs/dataset_images/train/' + str(count_train) + '/')
# # save initial image of an episode
# env.reset(o=env_stuff_data_init[2], description=None, info_reset=env_stuff_data_init[:2], joint_poses = joint_poses_init)
# img_arr_init = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
# renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# plt.imsave('./src/envs/dataset_images/train/' + str(count_train) + '/initial.png', img_arr_init)
# # save final image of an episode
# env.reset(o=env_stuff_data_final[2], description=None, info_reset=env_stuff_data_final[:2], joint_poses = joint_poses_final)
# img_arr_final = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
# renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# plt.imsave('./src/envs/dataset_images/train/' + str(count_train) + '/final.png', img_arr_final)
# if test_des:
# count_test = len(list(os.listdir('./src/envs/dataset_images/test/')))
# os.makedirs('./src/envs/dataset_images/test/' + str(count_test) + '/')
# # save initial image of an episode
# env.reset(o=env_stuff_data_init[2], description=None, info_reset=env_stuff_data_init[:2], joint_poses = joint_poses_init)
# img_arr_init = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
# renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# plt.imsave('./src/envs/dataset_images/test/' + str(count_test) + '/initial.png', img_arr_init)
# # save final image of an episode
# env.reset(o=env_stuff_data_final[2], description=None, info_reset=env_stuff_data_final[:2], joint_poses = joint_poses_final)
# img_arr_final = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
# renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# plt.imsave('./src/envs/dataset_images/test/' + str(count_test) + '/final.png', img_arr_final)
# Create the train and the test dataset.
pixels = 400
viewMatrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[-0.15, 0.14, 0.15], distance=1.3, yaw=-30, pitch=-30, roll=0, upAxisIndex=2)
projectionMatrix = p.computeProjectionMatrixFOV(fov=50, aspect=1, nearVal=0.01, farVal=10)
for i in range(101):
if os.path.exists('./src/envs/collected_data/UR5/Tianwei6/obs_act_etc/' + str(i) + '/'):
with np.load('./src/envs/collected_data/UR5/Tianwei6/obs_act_etc/' + str(i) + '/data.npz', allow_pickle=True) as data:
goal = data['goal_str']
obj_stuff_data = data['obj_stuff']
obs_init = data['obs'][0]
obs_final = data['obs'][-1]
env_stuff_data_init = []
for element in obj_stuff_data:
env_stuff_data_init.append(element)
env_stuff_data_init.append(obs_init)
env_stuff_data_final = []
for element in obj_stuff_data:
env_stuff_data_final.append(element)
env_stuff_data_final.append(obs_final)
joint_poses_init = data['joint_poses'][0]
joint_poses_final = data['joint_poses'][-1]
env = UR5PlayAbsRPY1Obj()
params = get_env_params()
train_descriptions, test_descriptions, all_descriptions = generate_all_descriptions(params)
train_des, test_des = sample_descriptions_from_state(obs_init, obs_final, obj_stuff_data, params)
if not os.path.exists('./src/envs/dataset/'):
os.makedirs('./src/envs/dataset/')
if not os.path.exists('./src/envs/dataset/train/'):
os.makedirs('./src/envs/dataset/train/')
if not os.path.exists('./src/envs/dataset/test/'):
os.makedirs('./src/envs/dataset/test/')
if train_des:
count_train = len(list(os.listdir('./src/envs/dataset/train/')))
os.makedirs('./src/envs/dataset/train/' + str(count_train) + '/')
# save initial image of an episode
env.reset(o=env_stuff_data_init[2], description=None, info_reset=env_stuff_data_init[:2], joint_poses = joint_poses_init)
img_arr_init = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# save final image of an episode
env.reset(o=env_stuff_data_final[2], description=None, info_reset=env_stuff_data_final[:2], joint_poses = joint_poses_final)
img_arr_final = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
np.savez('./src/envs/dataset/train/' + str(count_train) + '/data', obs_init=obs_init, obs_final=obs_final, obj_stuff_data=obj_stuff_data, joint_poses_init=joint_poses_init, joint_poses_final=joint_poses_final, img_arr_init=img_arr_init, img_arr_final=img_arr_final, goal=goal, descriptions=train_des)
if test_des:
count_test = len(list(os.listdir('./src/envs/dataset/test/')))
os.makedirs('./src/envs/dataset/test/' + str(count_test) + '/')
# save initial image of an episode
env.reset(o=env_stuff_data_init[2], description=None, info_reset=env_stuff_data_init[:2], joint_poses = joint_poses_init)
img_arr_init = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
# save final image of an episode
env.reset(o=env_stuff_data_final[2], description=None, info_reset=env_stuff_data_final[:2], joint_poses = joint_poses_final)
img_arr_final = p.getCameraImage(pixels, pixels, viewMatrix, projectionMatrix, flags=p.ER_NO_SEGMENTATION_MASK, shadow=0,
renderer=p.ER_BULLET_HARDWARE_OPENGL)[2][:, :, :3] # just the rgb
np.savez('./src/envs/dataset/test/' + str(count_test) + '/data', obs_init=obs_init, obs_final=obs_final, obj_stuff_data=obj_stuff_data, joint_poses_init=joint_poses_init, joint_poses_final=joint_poses_final, img_arr_init=img_arr_init, img_arr_final=img_arr_final, goal=goal, descriptions=test_des)
| 53.419087
| 312
| 0.662809
| 1,768
| 12,874
| 4.543552
| 0.102941
| 0.064982
| 0.059754
| 0.039836
| 0.846633
| 0.832815
| 0.81464
| 0.806921
| 0.800697
| 0.78912
| 0
| 0.018028
| 0.211512
| 12,874
| 240
| 313
| 53.641667
| 0.773323
| 0.604319
| 0
| 0.225806
| 0
| 0
| 0.097753
| 0.075896
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.145161
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
92dfe74ec6d7e33ed734c171ec3144ba9a5d84a5
| 95
|
py
|
Python
|
nnet/layer/__init__.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/layer/__init__.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
nnet/layer/__init__.py
|
zhaoyan1117/NeuralNet
|
a0343dd469e981bf9b4f18db0209ca9bfaf58c4f
|
[
"BSD-2-Clause"
] | null | null | null |
from ._fully_connected_layer import FullyConnectedLayer
from ._output_layer import OutputLayer
| 31.666667
| 55
| 0.894737
| 11
| 95
| 7.272727
| 0.727273
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 95
| 2
| 56
| 47.5
| 0.91954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
136daa61dca4b491331f6d486c35f27985fc62fe
| 34,305
|
py
|
Python
|
segregation/aspatial/multigroup_aspatial_indexes.py
|
weikang9009/segregation
|
403cc63772545f688308692d446c289ed2e7f99a
|
[
"BSD-3-Clause"
] | null | null | null |
segregation/aspatial/multigroup_aspatial_indexes.py
|
weikang9009/segregation
|
403cc63772545f688308692d446c289ed2e7f99a
|
[
"BSD-3-Clause"
] | null | null | null |
segregation/aspatial/multigroup_aspatial_indexes.py
|
weikang9009/segregation
|
403cc63772545f688308692d446c289ed2e7f99a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Multigroup Aspatial based Segregation Metrics
"""
__author__ = "Renan X. Cortes <renanc@ucr.edu>, Sergio J. Rey <sergio.rey@ucr.edu> and Elijah Knaap <elijah.knaap@ucr.edu>"
import numpy as np
from sklearn.metrics.pairwise import manhattan_distances
from segregation.util.util import _dep_message, DeprecationHelper, _nan_handle
# Including old and new api in __all__ so users can use both
__all__ = [
'Multi_Dissim',
'MultiDissim',
'Multi_Gini_Seg',
'MultiGiniSeg',
'Multi_Normalized_Exposure',
'MultiNormalizedExposure',
'Multi_Information_Theory',
'MultiInformationTheory',
'Multi_Relative_Diversity',
'MultiRelativeDiversity',
'Multi_Squared_Coefficient_of_Variation',
'MultiSquaredCoefficientVariation',
'Multi_Diversity',
'MultiDiversity',
'Simpsons_Concentration',
'SimpsonsConcentration',
'Simpsons_Interaction',
'SimpsonsInteraction',
'Multi_Divergence',
'MultiDivergence'
]
# The Deprecation calls of the classes are located in the end of this script #
# suppress numpy divide by zero warnings because it occurs a lot during the
# calculation of many indices
np.seterr(divide='ignore', invalid='ignore')
def _multi_dissim(data, groups):
"""
Calculation of Multigroup Dissimilarity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Dissimilarity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Sakoda, James M. "A generalized index of dissimilarity." Demography 18.2 (1981): 245-250.
Reference: :cite:`sakoda1981generalized`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
n = df.shape[0]
K = df.shape[1]
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
Is = (Pk * (1 - Pk)).sum()
multi_D = 1 / (2 * T * Is) * np.multiply(
abs(pik - Pk),
np.repeat(ti, K, axis=0).reshape(n, K)).sum()
return multi_D, core_data, groups
class MultiDissim:
"""
Calculation of Multigroup Dissimilarity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Dissimilarity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiDissim
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiDissim(input_df, groups_list)
>>> index.statistic
0.41340872573177806
Notes
-----
Based on Sakoda, James M. "A generalized index of dissimilarity." Demography 18.2 (1981): 245-250.
Reference: :cite:`sakoda1981generalized`.
"""
def __init__(self, data, groups):
aux = _multi_dissim(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_dissim
def _multi_gini_seg(data, groups):
"""
Calculation of Multigroup Gini Segregation index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Gini Segregation Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
K = df.shape[1]
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
Is = (Pk * (1 - Pk)).sum()
elements_sum = np.empty(K)
for k in range(K):
aux = np.multiply(np.outer(ti, ti),
manhattan_distances(pik[:, k].reshape(-1, 1))).sum()
elements_sum[k] = aux
multi_Gini_Seg = elements_sum.sum() / (2 * (T**2) * Is)
return multi_Gini_Seg, core_data, groups
class MultiGiniSeg:
"""
Calculation of Multigroup Gini Segregation index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Gini Segregation Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiGiniSeg
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiGiniSeg(input_df, groups_list)
>>> index.statistic
0.5456349992598081
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups):
aux = _multi_gini_seg(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_gini_seg
def _multi_normalized_exposure(data, groups):
"""
Calculation of Multigroup Normalized Exposure index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Normalized Exposure Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
MNE = ((ti[:, None] * (pik - Pk)**2) / (1 - Pk)).sum() / T
return MNE, core_data, groups
class MultiNormalizedExposure:
"""
Calculation of Multigroup Normalized Exposure index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Normalized Exposure Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiNormalizedExposure
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiNormalizedExposure(input_df, groups_list)
>>> index.statistic
0.18821879029994157
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups):
aux = _multi_normalized_exposure(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_normalized_exposure
def _multi_information_theory(data, groups):
"""
Calculation of Multigroup Information Theory index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Information Theory Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
# The natural logarithm is used, but this could be used with any base following Footnote 3 of pg. 37
# of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
E = (Pk * np.log(1 / Pk)).sum()
MIT = np.nansum(ti[:, None] * pik * np.log(pik / Pk)) / (T * E)
return MIT, core_data, groups
class MultiInformationTheory:
"""
Calculation of Multigroup Information Theory index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Information Theory Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiInformationTheory
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiInformationTheory(input_df, groups_list)
>>> index.statistic
0.1710160297858887
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups):
aux = _multi_information_theory(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_information_theory
def _multi_relative_diversity(data, groups):
"""
Calculation of Multigroup Relative Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Relative Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, Sean F. "Measures of racial diversity and segregation in multigroup and hierarchically structured populations." annual meeting of the Eastern Sociological Society, Philadelphia, PA. 1998.
High diversity means less segregation.
Reference: :cite:`reardon1998measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
Is = (Pk * (1 - Pk)).sum()
MRD = (ti[:, None] * (pik - Pk)**2).sum() / (T * Is)
return MRD, core_data, groups
class MultiRelativeDiversity:
"""
Calculation of Multigroup Relative Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Relative Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiRelativeDiversity
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiRelativeDiversity(input_df, groups_list)
>>> index.statistic
0.15820019878220337
Notes
-----
Based on Reardon, Sean F. "Measures of racial diversity and segregation in multigroup and hierarchically structured populations." annual meeting of the Eastern Sociological Society, Philadelphia, PA. 1998.
High diversity means less segregation.
Reference: :cite:`reardon1998measures`.
"""
def __init__(self, data, groups):
aux = _multi_relative_diversity(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_relative_diversity
def _multi_squared_coefficient_of_variation(data, groups):
"""
Calculation of Multigroup Squared Coefficient of Variation index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Squared Coefficient of Variation Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
K = df.shape[1]
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
C = ((ti[:, None] * (pik - Pk)**2) / (T * (K - 1) * Pk)).sum()
return C, core_data, groups
class MultiSquaredCoefficientVariation:
"""
Calculation of Multigroup Squared Coefficient of Variation index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Squared Coefficient of Variation Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiSquaredCoefficientVariation
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiSquaredCoefficientVariation(input_df, groups_list)
>>> index.statistic
0.11875484641127525
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups):
aux = _multi_squared_coefficient_of_variation(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_squared_coefficient_of_variation
def _multi_diversity(data, groups, normalized=False):
"""
Calculation of Multigroup Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
normalized : bool. Default is False.
Wheter the resulting index will be divided by its maximum (natural log of the number of groups)
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67 and Theil, Henry. "Statistical decomposition analysis; with applications in the social and administrative sciences". No. 04; HA33, T4.. 1972.
This is also know as Theil's Entropy Index (Equation 2 of page 37 of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67)
High diversity means less segregation.
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
Pk = df.sum(axis=0) / df.sum()
E = -(Pk * np.log(Pk)).sum()
if normalized:
K = df.shape[1]
E = E / np.log(K)
return E, core_data, groups
class MultiDiversity:
"""
Calculation of Multigroup Diversity index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Diversity Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiDiversity
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiDiversity(input_df, groups_list)
>>> index.statistic
0.9733112243997906
You can also fit the normalized version of the multigroup diversity index.
>>> normalized_index = Multi_Diversity(input_df, groups_list, normalized = True)
>>> normalized_index.statistic
0.7020956383415715
Notes
-----
Based on Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67 and Theil, Henry. "Statistical decomposition analysis; with applications in the social and administrative sciences". No. 04; HA33, T4.. 1972.
This is also know as Theil's Entropy Index (Equation 2 of page 37 of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67)
High diversity means less segregation.
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups, normalized=False):
aux = _multi_diversity(data, groups, normalized)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_diversity
def _simpsons_concentration(data, groups):
"""
Calculation of Simpson's Concentration index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Simpson's Concentration Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Simpson, Edward H. "Measurement of diversity." nature 163.4148 (1949): 688.
Simpson's concentration index (Lambda) can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to belong to the same group.
Higher values means higher segregation.
Simpson's Concentration + Simpson's Interaction = 1
Reference: :cite:`simpson1949measurement`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
Pk = df.sum(axis=0) / df.sum()
Lambda = (Pk * Pk).sum()
return Lambda, core_data, groups
class SimpsonsConcentration:
"""
Calculation of Simpson's Concentration index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Simpson's Concentration Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import SimpsonsConcentration
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = SimpsonsConcentration(input_df, groups_list)
>>> index.statistic
0.49182413151957904
Notes
-----
Based on Simpson, Edward H. "Measurement of diversity." nature 163.4148 (1949): 688.
Simpson's concentration index (Lambda) can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to belong to the same group.
Higher values means higher segregation.
Simpson's Concentration + Simpson's Interaction = 1
Reference: :cite:`simpson1949measurement`.
"""
def __init__(self, data, groups):
aux = _simpsons_concentration(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _simpsons_concentration
def _simpsons_interaction(data, groups):
"""
Calculation of Simpson's Interaction index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Simpson's Interaction Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Equation 1 of page 37 of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Simpson's interaction index (I) can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to not belong to the same group.
Higher values means lesser segregation.
Simpson's Concentration + Simpson's Interaction = 1
Reference: :cite:`reardon2002measures`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
Pk = df.sum(axis=0) / df.sum()
I = (Pk * (1 - Pk)).sum()
return I, core_data, groups
class SimpsonsInteraction:
"""
Calculation of Simpson's Interaction index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Simpson's Interaction Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import SimpsonsInteraction
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = SimpsonsInteraction(input_df, groups_list)
>>> index.statistic
0.508175868480421
Notes
-----
Based on Equation 1 of page 37 of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67.
Simpson's interaction index (I) can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to not belong to the same group.
Higher values means lesser segregation.
Simpson's Concentration + Simpson's Interaction = 1
Reference: :cite:`reardon2002measures`.
"""
def __init__(self, data, groups):
aux = _simpsons_interaction(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _simpsons_interaction
def _multi_divergence(data, groups):
"""
Calculation of Multigroup Divergence index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Returns
-------
statistic : float
Multigroup Divergence Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Notes
-----
Based on Roberto, Elizabeth. "The Divergence Index: A Decomposable Measure of Segregation and Inequality." arXiv preprint arXiv:1508.01167 (2015).
Reference: :cite:`roberto2015divergence`.
"""
core_data = data[groups]
data = _nan_handle(core_data)
df = np.array(core_data)
T = df.sum()
ti = df.sum(axis=1)
pik = df / ti[:, None]
Pk = df.sum(axis=0) / df.sum()
Di = np.nansum(pik * np.log(pik / Pk), axis=1)
Divergence_Index = ((ti / T) * Di).sum()
return Divergence_Index, core_data, groups
class MultiDivergence:
"""
Calculation of Multigroup Divergence index
Parameters
----------
data : a pandas DataFrame
groups : list of strings.
The variables names in data of the groups of interest of the analysis.
Attributes
----------
statistic : float
Multigroup Divergence Index
core_data : a pandas DataFrame
A pandas DataFrame that contains the columns used to perform the estimate.
Examples
--------
In this example, we are going to use 2000 Census Tract Data for Sacramento MSA, CA. The groups of interest are White, Black, Asian and Hispanic population.
Firstly, we need to perform some import the modules and the respective function.
>>> import libpysal
>>> import geopandas as gpd
>>> from segregation.multigroup_aspatial import MultiDivergence
Then, we read the data and create an auxiliary list with only the necessary columns for fitting the index.
>>> input_df = gpd.read_file(libpysal.examples.get_path("sacramentot2.shp"))
>>> groups_list = ['WHITE_', 'BLACK_', 'ASIAN_','HISP_']
The value is estimated below.
>>> index = MultiDivergence(input_df, groups_list)
>>> index.statistic
0.16645182134289443
Notes
-----
Based on Roberto, Elizabeth. "The Divergence Index: A Decomposable Measure of Segregation and Inequality." arXiv preprint arXiv:1508.01167 (2015).
Reference: :cite:`roberto2015divergence`.
"""
def __init__(self, data, groups):
aux = _multi_divergence(data, groups)
self.statistic = aux[0]
self.core_data = aux[1]
self._groups = aux[2]
self._function = _multi_divergence
# Deprecation Calls
msg = _dep_message("Multi_Dissim", "MultiDissim")
Multi_Dissim = DeprecationHelper(MultiDissim, message=msg)
msg = _dep_message("Multi_Gini_Seg", "MultiGiniSeg")
Multi_Gini_Seg = DeprecationHelper(MultiGiniSeg, message=msg)
msg = _dep_message("Multi_Normalized_Exposure", "MultiNormalizedExposure")
Multi_Normalized_Exposure = DeprecationHelper(MultiNormalizedExposure, message=msg)
msg = _dep_message("Multi_Information_Theory", "MultiInformationTheory")
Multi_Information_Theory = DeprecationHelper(MultiInformationTheory, message=msg)
msg = _dep_message("Multi_Relative_Diversity", "MultiRelativeDiversity")
Multi_Relative_Diversity = DeprecationHelper(MultiRelativeDiversity, message=msg)
msg = _dep_message("Multi_Squared_Coefficient_of_Variation", "MultiSquaredCoefficientVariation")
Multi_Squared_Coefficient_of_Variation = DeprecationHelper(MultiSquaredCoefficientVariation, message=msg)
msg = _dep_message("Multi_Diversity", "MultiDiversity")
Multi_Diversity = DeprecationHelper(MultiDiversity, message=msg)
msg = _dep_message("Simpsons_Concentration", "SimpsonsConcentration")
Simpsons_Concentration = DeprecationHelper(SimpsonsConcentration, message=msg)
msg = _dep_message("Simpsons_Interaction", "SimpsonsInteraction")
Simpsons_Interaction = DeprecationHelper(SimpsonsInteraction, message=msg)
msg = _dep_message("Multi_Divergence", "MultiDivergence")
Multi_Divergence = DeprecationHelper(MultiDivergence, message=msg)
| 28.659148
| 275
| 0.650051
| 4,100
| 34,305
| 5.330244
| 0.08439
| 0.025625
| 0.043928
| 0.036607
| 0.8479
| 0.815777
| 0.803285
| 0.772994
| 0.772033
| 0.770934
| 0
| 0.026892
| 0.26072
| 34,305
| 1,197
| 276
| 28.659148
| 0.834825
| 0.65935
| 0
| 0.471366
| 0
| 0.004405
| 0.10304
| 0.059008
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088106
| false
| 0
| 0.013216
| 0
| 0.189427
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
13808463dc10d7ab57efb4f0ce7a33996b4be809
| 28
|
py
|
Python
|
minesweeper/__init__.py
|
macTracyHuang/cs50wProject2
|
7321cef4b95b00b5efa7d22d865fb5a2e90edaf1
|
[
"MIT"
] | 1
|
2020-06-21T07:00:13.000Z
|
2020-06-21T07:00:13.000Z
|
minesweeper/__init__.py
|
macTracyHuang/cs50wProject2
|
7321cef4b95b00b5efa7d22d865fb5a2e90edaf1
|
[
"MIT"
] | null | null | null |
minesweeper/__init__.py
|
macTracyHuang/cs50wProject2
|
7321cef4b95b00b5efa7d22d865fb5a2e90edaf1
|
[
"MIT"
] | null | null | null |
from .minesweeper import bp
| 14
| 27
| 0.821429
| 4
| 28
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
138286d82f03f0e9f67b2e0b7a6473da84ccdb04
| 41
|
py
|
Python
|
LoRaRF/__init__.py
|
chandrawi/LoRaRF-Python
|
6ca07b16b838788da061bb430d73192c4b53e3ee
|
[
"MIT"
] | 6
|
2021-04-12T20:15:23.000Z
|
2022-02-01T15:18:18.000Z
|
LoRaRF/__init__.py
|
chandrawi/LoRaRF-Python
|
6ca07b16b838788da061bb430d73192c4b53e3ee
|
[
"MIT"
] | 1
|
2021-04-09T10:30:49.000Z
|
2022-01-20T04:22:42.000Z
|
LoRaRF/__init__.py
|
chandrawi/LoRaRF-Python
|
6ca07b16b838788da061bb430d73192c4b53e3ee
|
[
"MIT"
] | 4
|
2021-07-16T08:29:36.000Z
|
2022-03-28T10:13:17.000Z
|
# __init__.py
from .SX126x import SX126x
| 13.666667
| 26
| 0.780488
| 6
| 41
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 0.146341
| 41
| 2
| 27
| 20.5
| 0.628571
| 0.268293
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1396526624099d0e11a5ba0c8a3350fbfb81eae0
| 4,950
|
py
|
Python
|
tensorflow_federated/python/core/framework/__init__.py
|
truthiswill/federated
|
d25eeac036dfc2a485120a195fd904223cfc823a
|
[
"Apache-2.0"
] | 1
|
2022-02-08T01:11:14.000Z
|
2022-02-08T01:11:14.000Z
|
tensorflow_federated/python/core/framework/__init__.py
|
truthiswill/federated
|
d25eeac036dfc2a485120a195fd904223cfc823a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/core/framework/__init__.py
|
truthiswill/federated
|
d25eeac036dfc2a485120a195fd904223cfc823a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for extending the TensorFlow Federated core library."""
from tensorflow_federated.python.core.impl.compiler.building_blocks import ComputationBuildingBlock
from tensorflow_federated.python.core.impl.compiler.intrinsic_reductions import replace_intrinsics_with_bodies
from tensorflow_federated.python.core.impl.computation.computation_impl import ConcreteComputation
from tensorflow_federated.python.core.impl.computation.computation_serialization import deserialize_computation
from tensorflow_federated.python.core.impl.computation.computation_serialization import serialize_computation
from tensorflow_federated.python.core.impl.context_stack.context_base import Context
from tensorflow_federated.python.core.impl.context_stack.context_stack_base import ContextStack
from tensorflow_federated.python.core.impl.context_stack.get_context_stack import get_context_stack
from tensorflow_federated.python.core.impl.context_stack.set_default_context import set_default_context
from tensorflow_federated.python.core.impl.execution_contexts.sync_execution_context import ExecutionContext
from tensorflow_federated.python.core.impl.executors.cardinality_carrying_base import CardinalityCarrying
from tensorflow_federated.python.core.impl.executors.data_backend_base import DataBackend
from tensorflow_federated.python.core.impl.executors.data_descriptor import DataDescriptor
from tensorflow_federated.python.core.impl.executors.data_executor import DataExecutor
from tensorflow_federated.python.core.impl.executors.eager_tf_executor import EagerTFExecutor
from tensorflow_federated.python.core.impl.executors.executor_base import Executor
from tensorflow_federated.python.core.impl.executors.executor_factory import ExecutorFactory
from tensorflow_federated.python.core.impl.executors.executor_serialization import deserialize_value
from tensorflow_federated.python.core.impl.executors.executor_serialization import serialize_value
from tensorflow_federated.python.core.impl.executors.executor_service import ExecutorService
from tensorflow_federated.python.core.impl.executors.executor_stacks import local_executor_factory
from tensorflow_federated.python.core.impl.executors.executor_stacks import remote_executor_factory
from tensorflow_federated.python.core.impl.executors.executor_stacks import ResourceManagingExecutorFactory
from tensorflow_federated.python.core.impl.executors.executor_stacks import SizeInfo
from tensorflow_federated.python.core.impl.executors.executor_stacks import sizing_executor_factory
from tensorflow_federated.python.core.impl.executors.executor_stacks import SizingExecutorFactory
from tensorflow_federated.python.core.impl.executors.executor_stacks import thread_debugging_executor_factory
from tensorflow_federated.python.core.impl.executors.executor_value_base import ExecutorValue
from tensorflow_federated.python.core.impl.executors.federated_composing_strategy import FederatedComposingStrategy
from tensorflow_federated.python.core.impl.executors.federated_resolving_strategy import FederatedResolvingStrategy
from tensorflow_federated.python.core.impl.executors.federating_executor import FederatingExecutor
from tensorflow_federated.python.core.impl.executors.federating_executor import FederatingStrategy
from tensorflow_federated.python.core.impl.executors.ingestable_base import Ingestable
from tensorflow_federated.python.core.impl.executors.reference_resolving_executor import ReferenceResolvingExecutor
from tensorflow_federated.python.core.impl.executors.remote_executor import RemoteExecutor
from tensorflow_federated.python.core.impl.executors.thread_delegating_executor import ThreadDelegatingExecutor
from tensorflow_federated.python.core.impl.executors.transforming_executor import TransformingExecutor
from tensorflow_federated.python.core.impl.types.type_analysis import contains as type_contains
from tensorflow_federated.python.core.impl.types.type_conversions import type_from_tensors
from tensorflow_federated.python.core.impl.types.type_conversions import type_to_tf_tensor_specs
from tensorflow_federated.python.core.impl.types.type_serialization import deserialize_type
from tensorflow_federated.python.core.impl.types.type_serialization import serialize_type
from tensorflow_federated.python.core.impl.wrappers.computation_wrapper_instances import building_block_to_computation
| 83.898305
| 118
| 0.889697
| 625
| 4,950
| 6.8288
| 0.2464
| 0.200328
| 0.231724
| 0.292174
| 0.599578
| 0.599578
| 0.599578
| 0.475398
| 0.341612
| 0.300375
| 0
| 0.001715
| 0.057576
| 4,950
| 58
| 119
| 85.344828
| 0.913183
| 0.128485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
13aed3041ad5321f820556c496aadd9d5ae27d74
| 185
|
py
|
Python
|
jarvis_cli/interactive/__init__.py
|
clb6/jarvis-cli
|
44dfe0a94243e444eaddc72496efd677be9272e7
|
[
"Apache-2.0"
] | null | null | null |
jarvis_cli/interactive/__init__.py
|
clb6/jarvis-cli
|
44dfe0a94243e444eaddc72496efd677be9272e7
|
[
"Apache-2.0"
] | 3
|
2016-09-08T03:20:33.000Z
|
2016-12-08T05:19:57.000Z
|
jarvis_cli/interactive/__init__.py
|
clb6/jarvis-cli
|
44dfe0a94243e444eaddc72496efd677be9272e7
|
[
"Apache-2.0"
] | null | null | null |
from .for_events import prompt_event_occurred, prompt_event_category, \
prompt_event_weight, edit_event_description, prompt_event_artifacts
from .for_init import prompt_init_config
| 46.25
| 71
| 0.87027
| 26
| 185
| 5.653846
| 0.538462
| 0.29932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091892
| 185
| 3
| 72
| 61.666667
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b944f6fa0cbe518c4c16eb1177af1e2181b5539f
| 75
|
py
|
Python
|
fb2parser/fbparser.py
|
schokoro/Kataev
|
bcbde8ded4bbf472e7d644b38d87f625fdc53264
|
[
"Unlicense"
] | null | null | null |
fb2parser/fbparser.py
|
schokoro/Kataev
|
bcbde8ded4bbf472e7d644b38d87f625fdc53264
|
[
"Unlicense"
] | null | null | null |
fb2parser/fbparser.py
|
schokoro/Kataev
|
bcbde8ded4bbf472e7d644b38d87f625fdc53264
|
[
"Unlicense"
] | null | null | null |
from bs4 import BeautifulSoup
class Fbparser():
"""
"""
pass
| 9.375
| 29
| 0.573333
| 7
| 75
| 6.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.306667
| 75
| 8
| 30
| 9.375
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
b94c86fb60a8d4ba816638cd4f58905d040df276
| 25
|
py
|
Python
|
course_api/common/test.py
|
alperen21/Sabanci-University-Course-RESTful-Api
|
3a8f772b65276d04cc4dd22d2df74253be0ea08d
|
[
"MIT"
] | null | null | null |
course_api/common/test.py
|
alperen21/Sabanci-University-Course-RESTful-Api
|
3a8f772b65276d04cc4dd22d2df74253be0ea08d
|
[
"MIT"
] | null | null | null |
course_api/common/test.py
|
alperen21/Sabanci-University-Course-RESTful-Api
|
3a8f772b65276d04cc4dd22d2df74253be0ea08d
|
[
"MIT"
] | null | null | null |
from .. import course_api
| 25
| 25
| 0.8
| 4
| 25
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 25
| 1
| 25
| 25
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b9dffbe909d0d0c0888590012ab742406babfe6d
| 15,780
|
py
|
Python
|
auto_derby/single_mode/context_test.py
|
DoctrineAlanK/auto-derby
|
781e860b06b9686e56feab115d2212251cd99d10
|
[
"MIT"
] | 235
|
2021-05-24T12:09:18.000Z
|
2022-03-31T03:44:08.000Z
|
auto_derby/single_mode/context_test.py
|
DoctrineAlanK/auto-derby
|
781e860b06b9686e56feab115d2212251cd99d10
|
[
"MIT"
] | 193
|
2021-05-27T16:49:14.000Z
|
2022-03-31T16:38:08.000Z
|
auto_derby/single_mode/context_test.py
|
DoctrineAlanK/auto-derby
|
781e860b06b9686e56feab115d2212251cd99d10
|
[
"MIT"
] | 89
|
2021-05-30T17:07:24.000Z
|
2022-03-27T15:41:04.000Z
|
from typing import Text
import pytest
from .. import _test
from .context import Context
@pytest.mark.parametrize(
"name",
tuple(
i.stem for i in ((_test.DATA_PATH / "single_mode").glob("command_scene_*.png"))
),
)
def test_recognize_command_scene(name: Text):
img, _ = _test.use_screenshot(f"single_mode/{name}.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(
ctx,
name=name,
)
def test_update_by_command_scene_1():
img, _ = _test.use_screenshot("single_mode/command_scene_1.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (1, 12, 2), ctx.date
assert round(ctx.vitality, 2) == 0.92, ctx.vitality
assert ctx.speed == 281, ctx.speed
assert ctx.stamina == 217, ctx.stamina
assert ctx.power == 210, ctx.power
assert ctx.guts == 187, ctx.guts
assert ctx.wisdom == 266, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_2():
img, _ = _test.use_screenshot("single_mode/command_scene_2.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (2, 1, 1), ctx.date
assert round(ctx.vitality, 2) == 0.92, ctx.vitality
assert ctx.speed == 281, ctx.speed
assert ctx.stamina == 217, ctx.stamina
assert ctx.power == 210, ctx.power
assert ctx.guts == 198, ctx.guts
assert ctx.wisdom == 266, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_3():
img, _ = _test.use_screenshot("single_mode/command_scene_3.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (3, 1, 1), ctx.date
assert round(ctx.vitality, 2) == 1, ctx.vitality
assert ctx.speed == 589, ctx.speed
assert ctx.stamina == 375, ctx.stamina
assert ctx.power == 461, ctx.power
assert ctx.guts == 263, ctx.guts
assert ctx.wisdom == 386, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_4():
img, _ = _test.use_screenshot("single_mode/command_scene_4.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (2, 4, 1), ctx.date
assert round(ctx.vitality, 2) == 0.95, ctx.vitality
assert ctx.speed == 357, ctx.speed
assert ctx.stamina == 279, ctx.stamina
assert ctx.power == 275, ctx.power
assert ctx.guts == 216, ctx.guts
assert ctx.wisdom == 250, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_5():
img, _ = _test.use_screenshot("single_mode/command_scene_5.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (3, 3, 1), ctx.date
assert round(ctx.vitality, 2) == 0.74, ctx.vitality
assert ctx.speed == 568, ctx.speed
assert ctx.stamina == 368, ctx.stamina
assert ctx.power == 341, ctx.power
assert ctx.guts == 307, ctx.guts
assert ctx.wisdom == 329, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_6():
img, _ = _test.use_screenshot("single_mode/command_scene_6.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (2, 10, 1), ctx.date
assert round(ctx.vitality, 2) == 0.46, ctx.vitality
assert ctx.speed == 510, ctx.speed
assert ctx.stamina == 317, ctx.stamina
assert ctx.power == 351, ctx.power
assert ctx.guts == 298, ctx.guts
assert ctx.wisdom == 314, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_7():
img, _ = _test.use_screenshot("single_mode/command_scene_7.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (2, 12, 2), ctx.date
assert round(ctx.vitality, 2) == 0.33, ctx.vitality
assert ctx.speed == 615, ctx.speed
assert ctx.stamina == 316, ctx.stamina
assert ctx.power == 459, ctx.power
assert ctx.guts == 251, ctx.guts
assert ctx.wisdom == 382, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_issue7():
img, _ = _test.use_screenshot("single_mode/command_scene_issue7.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (1, 0, 0)
assert round(ctx.vitality, 2) == 1
assert ctx.speed == 158
assert ctx.stamina == 190
assert ctx.power == 67
assert ctx.guts == 95
assert ctx.wisdom == 90
assert ctx.mood == ctx.MOOD_NORMAL
def test_update_by_command_scene_issue12():
img, _ = _test.use_screenshot("single_mode/command_scene_issue12.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (1, 12, 2), ctx.date
assert round(ctx.vitality, 2) == 0.80, ctx.vitality
assert ctx.speed == 266, ctx.speed
assert ctx.stamina == 228, ctx.stamina
assert ctx.power == 196, ctx.power
assert ctx.guts == 200, ctx.guts
assert ctx.wisdom == 176, ctx.wisdom
assert ctx.mood == ctx.MOOD_BAD, ctx.mood
def test_update_by_command_scene_issue12_2():
img, _ = _test.use_screenshot("single_mode/command_scene_issue12_2.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (1, 10, 1), ctx.date
assert round(ctx.vitality, 2) == 0.79, ctx.vitality
assert ctx.speed == 241, ctx.speed
assert ctx.stamina == 237, ctx.stamina
assert ctx.power == 144, ctx.power
assert ctx.guts == 187, ctx.guts
assert ctx.wisdom == 184, ctx.wisdom
assert ctx.mood == ctx.MOOD_GOOD, ctx.mood
def test_update_by_command_scene_issue17():
img, _ = _test.use_screenshot("single_mode/command_scene_issue17.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (1, 0, 0), ctx.date
assert round(ctx.vitality, 2) == 0.53, ctx.vitality
assert ctx.speed == 195, ctx.speed
assert ctx.stamina == 150, ctx.stamina
assert ctx.power == 119, ctx.power
assert ctx.guts == 115, ctx.guts
assert ctx.wisdom == 91, ctx.wisdom
assert ctx.mood == ctx.MOOD_GOOD, ctx.mood
def test_update_by_command_scene_issue17_2():
img, _ = _test.use_screenshot("single_mode/command_scene_issue17_2.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (1, 11, 2), ctx.date
assert round(ctx.vitality, 2) == 1, ctx.vitality
assert ctx.speed == 262, ctx.speed
assert ctx.stamina == 266, ctx.stamina
assert ctx.power == 142, ctx.power
assert ctx.guts == 156, ctx.guts
assert ctx.wisdom == 233, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_issue41():
img, _ = _test.use_screenshot("single_mode/command_scene_issue41.png")
ctx = Context.new()
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (3, 11, 1), ctx.date
assert round(ctx.vitality, 2) == 0.84, ctx.vitality
assert ctx.speed == 1200, ctx.speed
assert ctx.stamina == 753, ctx.stamina
assert ctx.power == 616, ctx.power
assert ctx.guts == 364, ctx.guts
assert ctx.wisdom == 326, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_command_scene_issue113():
ctx = Context.new()
img, _ = _test.use_screenshot("single_mode/command_scene_issue113.png")
ctx.update_by_command_scene(img)
_test.snapshot_match(ctx)
assert ctx.date == (4, 0, 0), ctx.date
assert round(ctx.vitality, 2) == 0.72, ctx.vitality
assert ctx.speed == 1144, ctx.speed
assert ctx.stamina == 482, ctx.stamina
assert ctx.power == 459, ctx.power
assert ctx.guts == 343, ctx.guts
assert ctx.wisdom == 437, ctx.wisdom
assert ctx.mood == ctx.MOOD_VERY_GOOD, ctx.mood
def test_update_by_class_detail():
img, _ = _test.use_screenshot("single_mode/class_detail.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 1, ctx.fan_count
assert ctx.is_after_winning == False, ctx.is_after_winning
def test_update_by_class_detail_2():
img, _ = _test.use_screenshot("single_mode/class_detail_2.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 1225, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_class_detail_3():
img, _ = _test.use_screenshot("single_mode/class_detail_3.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 11950, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_class_detail_4():
img, _ = _test.use_screenshot("single_mode/class_detail_4.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 148805, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_class_detail_5():
img, _ = _test.use_screenshot("single_mode/class_detail_5.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 127591, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_class_detail_6():
img, _ = _test.use_screenshot("single_mode/class_detail_6.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 121794, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_class_detail_issue35():
img, _ = _test.use_screenshot("single_mode/class_detail_issue35.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 1129, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_class_detail_issue35_2():
img, _ = _test.use_screenshot("single_mode/class_detail_issue35_2.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 4119, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_class_detail_issue86():
img, _ = _test.use_screenshot("single_mode/class_detail_issue86.png")
ctx = Context.new()
ctx.update_by_class_detail(img)
_test.snapshot_match(ctx)
assert ctx.fan_count == 88556, ctx.fan_count
assert ctx.is_after_winning == True, ctx.is_after_winning
def test_update_by_character_detail():
img, _ = _test.use_screenshot("single_mode/character_detail.png")
ctx = Context.new()
ctx.update_by_character_detail(img)
assert ctx.turf == ctx.STATUS_A, ctx.turf
assert ctx.dart == ctx.STATUS_G, ctx.dart
assert ctx.sprint == ctx.STATUS_C, ctx.sprint
assert ctx.mile == ctx.STATUS_B, ctx.mile
assert ctx.intermediate == ctx.STATUS_A, ctx.intermediate
assert ctx.long == ctx.STATUS_C, ctx.long
assert ctx.lead == ctx.STATUS_D, ctx.lead
assert ctx.head == ctx.STATUS_A, ctx.head
assert ctx.middle == ctx.STATUS_A, ctx.middle
assert ctx.last == ctx.STATUS_G, ctx.last
def test_update_by_character_detail_2():
img, _ = _test.use_screenshot("single_mode/character_detail_2.png")
ctx = Context.new()
ctx.update_by_character_detail(img)
assert ctx.turf == ctx.STATUS_A, ctx.turf
assert ctx.dart == ctx.STATUS_E, ctx.dart
assert ctx.sprint == ctx.STATUS_D, ctx.sprint
assert ctx.mile == ctx.STATUS_D, ctx.mile
assert ctx.intermediate == ctx.STATUS_A, ctx.intermediate
assert ctx.long == ctx.STATUS_A, ctx.long
assert ctx.lead == ctx.STATUS_A, ctx.lead
assert ctx.head == ctx.STATUS_A, ctx.head
assert ctx.middle == ctx.STATUS_B, ctx.middle
assert ctx.last == ctx.STATUS_B, ctx.last
def test_update_by_character_detail_3():
img, _ = _test.use_screenshot("single_mode/character_detail_3.png")
ctx = Context.new()
ctx.update_by_character_detail(img)
assert ctx.turf == ctx.STATUS_S, ctx.turf
assert ctx.dart == ctx.STATUS_G, ctx.dart
assert ctx.sprint == ctx.STATUS_C, ctx.sprint
assert ctx.mile == ctx.STATUS_B, ctx.mile
assert ctx.intermediate == ctx.STATUS_A, ctx.intermediate
assert ctx.long == ctx.STATUS_C, ctx.long
assert ctx.lead == ctx.STATUS_D, ctx.lead
assert ctx.head == ctx.STATUS_A, ctx.head
assert ctx.middle == ctx.STATUS_A, ctx.middle
assert ctx.last == ctx.STATUS_G, ctx.last
def test_update_by_character_detail_4():
img, _ = _test.use_screenshot("single_mode/character_detail_4.png")
ctx = Context.new()
ctx.update_by_character_detail(img)
assert ctx.turf == ctx.STATUS_A, ctx.turf
assert ctx.dart == ctx.STATUS_G, ctx.dart
assert ctx.sprint == ctx.STATUS_C, ctx.sprint
assert ctx.mile == ctx.STATUS_B, ctx.mile
assert ctx.intermediate == ctx.STATUS_S, ctx.intermediate
assert ctx.long == ctx.STATUS_C, ctx.long
assert ctx.lead == ctx.STATUS_D, ctx.lead
assert ctx.head == ctx.STATUS_A, ctx.head
assert ctx.middle == ctx.STATUS_A, ctx.middle
assert ctx.last == ctx.STATUS_G, ctx.last
def test_update_by_character_detail_5():
img, _ = _test.use_screenshot("single_mode/character_detail_5.png")
ctx = Context.new()
ctx.update_by_character_detail(img)
assert ctx.turf == ctx.STATUS_A, ctx.turf
assert ctx.dart == ctx.STATUS_G, ctx.dart
assert ctx.sprint == ctx.STATUS_C, ctx.sprint
assert ctx.mile == ctx.STATUS_B, ctx.mile
assert ctx.intermediate == ctx.STATUS_A, ctx.intermediate
assert ctx.long == ctx.STATUS_A, ctx.long
assert ctx.lead == ctx.STATUS_A, ctx.lead
assert ctx.head == ctx.STATUS_D, ctx.head
assert ctx.middle == ctx.STATUS_F, ctx.middle
assert ctx.last == ctx.STATUS_G, ctx.last
assert ctx.conditions == set((ctx.CONDITION_HEADACHE,)), ctx.conditions
def test_update_by_character_detail_6():
img, _ = _test.use_screenshot("single_mode/character_detail_6.png")
ctx = Context.new()
ctx.update_by_character_detail(img)
assert ctx.turf == ctx.STATUS_A, ctx.turf
assert ctx.dart == ctx.STATUS_E, ctx.dart
assert ctx.sprint == ctx.STATUS_G, ctx.sprint
assert ctx.mile == ctx.STATUS_E, ctx.mile
assert ctx.intermediate == ctx.STATUS_A, ctx.intermediate
assert ctx.long == ctx.STATUS_A, ctx.long
assert ctx.lead == ctx.STATUS_C, ctx.lead
assert ctx.head == ctx.STATUS_A, ctx.head
assert ctx.middle == ctx.STATUS_A, ctx.middle
assert ctx.last == ctx.STATUS_G, ctx.last
assert ctx.conditions == set((ctx.CONDITION_OVERWEIGHT,)), ctx.conditions
def test_update_by_character_detail_issue39():
img, _ = _test.use_screenshot("single_mode/character_detail_issue39.png")
ctx = Context.new()
ctx.update_by_character_detail(img)
assert ctx.turf == ctx.STATUS_A, ctx.turf
assert ctx.dart == ctx.STATUS_F, ctx.dart
assert ctx.sprint == ctx.STATUS_F, ctx.sprint
assert ctx.mile == ctx.STATUS_C, ctx.mile
assert ctx.intermediate == ctx.STATUS_A, ctx.intermediate
assert ctx.long == ctx.STATUS_A, ctx.long
assert ctx.lead == ctx.STATUS_G, ctx.lead
assert ctx.head == ctx.STATUS_A, ctx.head
assert ctx.middle == ctx.STATUS_A, ctx.middle
assert ctx.last == ctx.STATUS_F, ctx.last
| 34.911504
| 87
| 0.701648
| 2,438
| 15,780
| 4.280558
| 0.073831
| 0.162131
| 0.029705
| 0.05941
| 0.934266
| 0.827616
| 0.821388
| 0.795324
| 0.690878
| 0.638846
| 0
| 0.033313
| 0.1782
| 15,780
| 451
| 88
| 34.988914
| 0.771437
| 0
| 0
| 0.487603
| 0
| 0
| 0.067554
| 0.065399
| 0
| 0
| 0
| 0
| 0.556474
| 1
| 0.085399
| false
| 0
| 0.011019
| 0
| 0.096419
| 0.019284
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6a13379b387645ae03b75b179052692e60589867
| 48
|
py
|
Python
|
lcd/models/__init__.py
|
pfe-everis/lcd
|
25f3fe7dc7e0c8ba02fb380dbcbe7752747b3fb5
|
[
"BSD-3-Clause"
] | 76
|
2019-11-22T04:28:37.000Z
|
2022-03-31T12:48:04.000Z
|
lcd/models/__init__.py
|
pfe-everis/lcd
|
25f3fe7dc7e0c8ba02fb380dbcbe7752747b3fb5
|
[
"BSD-3-Clause"
] | 10
|
2019-12-23T02:28:24.000Z
|
2022-03-18T08:08:16.000Z
|
lcd/models/__init__.py
|
pfe-everis/lcd
|
25f3fe7dc7e0c8ba02fb380dbcbe7752747b3fb5
|
[
"BSD-3-Clause"
] | 7
|
2019-11-23T08:21:52.000Z
|
2021-12-29T14:40:57.000Z
|
from .pointnet import *
from .patchnet import *
| 16
| 23
| 0.75
| 6
| 48
| 6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 2
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a2c91d20d38e700ce6f82bff85808c2880b7dc6
| 242
|
py
|
Python
|
typings/bpy/ops/material.py
|
Argmaster/PyR3
|
6786bcb6a101fe4bd4cc50fe43767b8178504b15
|
[
"MIT"
] | 2
|
2021-12-12T18:51:52.000Z
|
2022-02-23T09:49:16.000Z
|
src/blender/blender_autocomplete-master/2.92/bpy/ops/material.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | 2
|
2021-11-08T12:09:02.000Z
|
2021-12-12T23:01:12.000Z
|
src/blender/blender_autocomplete-master/2.92/bpy/ops/material.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | null | null | null |
import sys
import typing
def copy():
''' Copy the material settings and nodes
'''
pass
def new():
''' Add a new material
'''
pass
def paste():
''' Paste the material settings and nodes
'''
pass
| 8.962963
| 45
| 0.545455
| 29
| 242
| 4.551724
| 0.517241
| 0.166667
| 0.287879
| 0.333333
| 0.469697
| 0.469697
| 0
| 0
| 0
| 0
| 0
| 0
| 0.342975
| 242
| 26
| 46
| 9.307692
| 0.830189
| 0.433884
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| true
| 0.375
| 0.25
| 0
| 0.625
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
6a46deffc0f9f4a54eac36b0579978ff6c8127be
| 125
|
py
|
Python
|
exabel_data_sdk/stubs/exabel/api/math/all_pb2.py
|
burk/python-sdk
|
83fb81d09e0d6a407c8907a75bebb895decc7edc
|
[
"MIT"
] | null | null | null |
exabel_data_sdk/stubs/exabel/api/math/all_pb2.py
|
burk/python-sdk
|
83fb81d09e0d6a407c8907a75bebb895decc7edc
|
[
"MIT"
] | null | null | null |
exabel_data_sdk/stubs/exabel/api/math/all_pb2.py
|
burk/python-sdk
|
83fb81d09e0d6a407c8907a75bebb895decc7edc
|
[
"MIT"
] | null | null | null |
# Generated by generate_protobuf.sh.
# Contains all messages in *_pb2.py in a single module.
from .aggregation_pb2 import *
| 25
| 55
| 0.776
| 19
| 125
| 4.947368
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018868
| 0.152
| 125
| 4
| 56
| 31.25
| 0.867925
| 0.704
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6a49243c5a525052cc812aa3acb1a2c0de9c849a
| 130
|
py
|
Python
|
run_test.py
|
neso613/english_asr_pip_wheel
|
d7db59f97d0317028f610097fda0e35c059ee610
|
[
"Apache-2.0"
] | null | null | null |
run_test.py
|
neso613/english_asr_pip_wheel
|
d7db59f97d0317028f610097fda0e35c059ee610
|
[
"Apache-2.0"
] | null | null | null |
run_test.py
|
neso613/english_asr_pip_wheel
|
d7db59f97d0317028f610097fda0e35c059ee610
|
[
"Apache-2.0"
] | null | null | null |
from english_asr.conformer import get_text_from_speech
text = get_text_from_speech('1.flac')
print('English ASR output : ',text)
| 26
| 54
| 0.8
| 21
| 130
| 4.619048
| 0.571429
| 0.206186
| 0.226804
| 0.350515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.1
| 130
| 4
| 55
| 32.5
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0.209302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
dbebfdae9b078a7fcea42683415aff2321c7c88f
| 27,315
|
py
|
Python
|
opentamp/util_classes/ik_controller.py
|
Algorithmic-Alignment-Lab/openTAMP-legacy
|
3b7c3be164cc968ad77a928286d6460cd70a670e
|
[
"MIT"
] | 2
|
2022-03-09T19:48:20.000Z
|
2022-03-26T17:31:07.000Z
|
opentamp/util_classes/ik_controller.py
|
Algorithmic-Alignment-Lab/OpenTAMP
|
eecb950bd273da8cbed4394487630e8453f2c242
|
[
"MIT"
] | null | null | null |
opentamp/util_classes/ik_controller.py
|
Algorithmic-Alignment-Lab/OpenTAMP
|
eecb950bd273da8cbed4394487630e8453f2c242
|
[
"MIT"
] | null | null | null |
"""
Adapted from: https://github.com/StanfordVL/robosuite/blob/master/robosuite/controllers/baxter_ik_controller.py
@inproceedings{corl2018surreal,
title={SURREAL: Open-Source Reinforcement Learning Framework and Robot Manipulation Benchmark},
author={Fan, Linxi and Zhu, Yuke and Zhu, Jiren and Liu, Zihua and Zeng, Orien and Gupta, Anchit and Creus-Costa, Joan and Savarese, Silvio and Fei-Fei, Li},
booktitle={Conference on Robot Learning},
year={2018}
}
"""
import os
import numpy as np
try:
import pybullet as p
except ImportError:
raise Exception(
"Please make sure pybullet is installed. Run `pip install pybullet==1.9.5`"
)
import opentamp
from opentamp.util_classes import transform_utils as T
class BaxterIKController(object):
"""
Inverse kinematics for the Baxter robot, using Pybullet and the urdf description
files.
"""
def __init__(self, robot_jpos_getter, use_rot_mat=False):
"""
Args:
bullet_data_path (str): base path to bullet data.
robot_jpos_getter (function): function that returns the joint positions of
the robot to be controlled as a numpy array.
"""
# Set up inverse kinematics
self.robot_jpos_getter = robot_jpos_getter
self.use_rot_mat = use_rot_mat
path = os.getcwd() + '/opentamp' + "/robot_info/baxter/baxter_description/urdf/baxter_mod.urdf"
self.setup_inverse_kinematics(path)
self.commanded_joint_positions = robot_jpos_getter()
self.sync_state()
self._name2id = {}
self._id2name = {}
for i in range(p.getNumJoints(self.ik_robot)):
jnt_info = p.getJointInfo(self.ik_robot, i)
link_name = jnt_info[12]
self._name2id[link_name] = i
self._id2name[i] = link_name
def id2name(self, ind):
return self._id2name[ind]
def name2id(self, name):
return self._name2id[name]
def get_control(self, right, left):
"""
Returns joint velocities to control the robot after the target end effector
positions and orientations are updated from arguments @left and @right.
Args:
left (dict): A dictionary to control the left end effector with these keys.
dpos (numpy array): a 3 dimensional array corresponding to the desired
change in x, y, and z left end effector position.
rotation (numpy array): a rotation matrix of shape (3, 3) corresponding
to the desired orientation of the left end effector.
right (dict): A dictionary to control the left end effector with these keys.
dpos (numpy array): a 3 dimensional array corresponding to the desired
change in x, y, and z right end effector position.
rotation (numpy array): a rotation matrix of shape (3, 3) corresponding
to the desired orientation of the right end effector.
Returns:
velocities (numpy array): a flat array of joint velocity commands to apply
to try and achieve the desired input control.
"""
# Sync joint positions for IK.
self.sync_ik_robot(self.robot_jpos_getter())
# Compute target joint positions
self.commanded_joint_positions = self.joint_positions_for_eef_command(
right, left
)
# P controller from joint positions (from IK) to velocities
velocities = np.zeros(14)
deltas = self._get_current_error(
self.robot_jpos_getter(), self.commanded_joint_positions
)
for i, delta in enumerate(deltas):
velocities[i] = -2 * delta
velocities = self.clip_joint_velocities(velocities)
self.commanded_joint_velocities = velocities
return velocities
# For debugging purposes: set joint positions directly
# robot.set_joint_positions(self.commanded_joint_positions)
def sync_state(self):
"""
Syncs the internal Pybullet robot state to the joint positions of the
robot being controlled.
"""
# sync IK robot state to the current robot joint positions
self.sync_ik_robot(self.robot_jpos_getter())
# make sure target pose is up to date
pos_r, orn_r, pos_l, orn_l = self.ik_robot_eef_joint_cartesian_pose()
self.ik_robot_target_pos_right = pos_r
self.ik_robot_target_orn_right = orn_r
self.ik_robot_target_pos_left = pos_l
self.ik_robot_target_orn_left = orn_l
def setup_inverse_kinematics(self, urdf_path):
"""
This function is responsible for doing any setup for inverse kinematics.
Inverse Kinematics maps end effector (EEF) poses to joint angles that
are necessary to achieve those poses.
"""
# These indices come from the urdf file we're using
self.effector_right = 27
self.effector_left = 45
# Use PyBullet to handle inverse kinematics.
# Set up a connection to the PyBullet simulator.
p.connect(p.DIRECT)
p.resetSimulation()
self.ik_robot = p.loadURDF(urdf_path,
(0, 0, 0),
(0, 0, 0, 1),
useFixedBase=1,
flags=p.URDF_USE_SELF_COLLISION | \
p.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS)
# Relevant joints we care about. Many of the joints are fixed and don't count, so
# we need this second map to use the right ones.
self.actual = [13, 14, 15, 16, 17, 19, 20, 31, 32, 33, 34, 35, 37, 38]
self.num_joints = p.getNumJoints(self.ik_robot)
n = p.getNumJoints(self.ik_robot)
self.rest = []
self.lower = []
self.upper = []
self.ranges = []
for i in range(n):
info = p.getJointInfo(self.ik_robot, i)
# Retrieve lower and upper ranges for each relevant joint
if info[3] > -1:
self.rest.append(p.getJointState(self.ik_robot, i)[0])
self.lower.append(info[8])
self.upper.append(info[9])
self.ranges.append(info[9] - info[8])
# Simulation will update as fast as it can in real time, instead of waiting for
# step commands like in the non-realtime case.
p.setRealTimeSimulation(1)
def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True):
"""
Force the internal robot model to match the provided joint angles.
Args:
joint_positions (list): a list or flat numpy array of joint positions.
simulate (bool): If True, actually use physics simulation, else
write to physics state directly.
sync_last (bool): If False, don't sync the last joint angle. This
is useful for directly controlling the roll at the end effector.
"""
num_joints = len(joint_positions)
if not sync_last:
num_joints -= 1
for i in range(num_joints):
if simulate:
p.setJointMotorControl2(
self.ik_robot,
self.actual[i],
p.POSITION_CONTROL,
targetVelocity=0,
targetPosition=joint_positions[i],
force=500,
positionGain=0.5,
velocityGain=1.,
)
else:
# Note that we use self.actual[i], and not i
p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])
def sync_ik_from_attrs(self, attr_vals):
if 'rArmPose' in attr_vals:
for i in range(7):
p.resetJointState(self.ik_robot, self.actual[i], attr_vals['rArmPose'][i])
if 'lArmPose' in attr_vals:
for i in range(7, 14):
p.resetJointState(self.ik_robot, self.actual[i], attr_vals['lArmPose'][i-7])
if 'rGripper' in attr_vals:
p.resetJointState(self.ik_robot, self.gripper_inds[0], attr_vals['Gripper'][0])
if 'lGripper' in attr_vals:
p.resetJointState(self.ik_robot, self.gripper_inds[1], attr_vals['lGripper'][0])
def ik_robot_eef_joint_cartesian_pose(self):
"""
Returns the current cartesian pose of the last joint of the ik robot with respect
to the base frame as a (pos, orn) tuple where orn is a x-y-z-w quaternion.
"""
out = []
for eff in [self.effector_right, self.effector_left]:
eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, eff)[0])
eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, eff)[1])
eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))
base_pos_in_world = np.array(
p.getBasePositionAndOrientation(self.ik_robot)[0]
)
base_orn_in_world = np.array(
p.getBasePositionAndOrientation(self.ik_robot)[1]
)
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
world_pose_in_base = T.pose_inv(base_pose_in_world)
eef_pose_in_base = T.pose_in_A_to_pose_in_B(
pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base
)
out.extend(T.mat2pose(eef_pose_in_base))
return out
def get_manip_trans(self, right=True):
eff = self.effector_right if right else self.effector_left
eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, eff)[0])
eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, eff)[1])
eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))
pos, quat = p.getBasePositionAndOrientation(self.ik_robot)
base_pos_in_world = np.array(pos)
base_orn_in_world = np.array(quat)
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
world_pose_in_base = T.pose_inv(base_pose_in_world)
eef_pose_in_base = T.pose_in_A_to_pose_in_B(
pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base
)
return eef_pose_in_base
def get_jnt_angles(self, right=True):
jnts = self.actual[:7] if right else self.actual[7:]
jnt_info = p.getJointStates(jnts)
pos = jnt_info[0]
return pos
def get_jnt_bounds(self, right=True):
return (self.lower[:7], self.upper[:7]) if right else (self.lower[7:], self.upper[7:])
def inverse_kinematics(
self,
target_position,
target_orientation,
use_right,
rest_poses,
):
"""
Helper function to do inverse kinematics for a given target position and
orientation in the PyBullet world frame.
Args:
target_position_{right, left}: A tuple, list, or numpy array of size 3 for position.
target_orientation_{right, left}: A tuple, list, or numpy array of size 4 for
a orientation quaternion.
rest_poses: A list of size @num_joints to favor ik solutions close by.
Returns:
A list of size @num_joints corresponding to the joint angle solution.
"""
ndof = 48
if use_right:
ik_solution = list(
p.calculateInverseKinematics(
self.ik_robot,
self.effector_right,
target_position,
targetOrientation=target_orientation,
restPoses=rest_poses[:7],
lowerLimits=self.lower,
upperLimits=self.upper,
jointRanges=self.ranges,
jointDamping=[0.1] * ndof,
)
)
return ik_solution[1:8]
else:
ik_solution = list(
p.calculateInverseKinematics(
self.ik_robot,
self.effector_left,
target_position,
targetOrientation=target_orientation,
restPoses=rest_poses[7:],
lowerLimits=self.lower,
upperLimits=self.upper,
jointRanges=self.ranges,
jointDamping=[0.1] * ndof,
)
)
return ik_solution[8:15]
def bullet_base_pose_to_world_pose(self, pose_in_base):
"""
Convert a pose in the base frame to a pose in the world frame.
Args:
pose_in_base: a (pos, orn) tuple.
Returns:
pose_in world: a (pos, orn) tuple.
"""
pose_in_base = T.pose2mat(pose_in_base)
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
pose_in_world = T.pose_in_A_to_pose_in_B(
pose_A=pose_in_base, pose_A_in_B=base_pose_in_world
)
return T.mat2pose(pose_in_world)
def joint_positions_for_eef_command(self, cmd, use_right):
"""
This function runs inverse kinematics to back out target joint positions
from the provided end effector command.
Same arguments as @get_control.
Returns:
A list of size @num_joints corresponding to the target joint angles.
"""
dpos = cmd["dpos"]
rotation = cmd["rotation"]
if use_right:
self.target_pos_right = self.ik_robot_target_pos_right #+ np.array([0, 0, 0.913])
self.ik_robot_target_pos_right = dpos # += dpos
self.ik_robot_target_orn_right = rotation
world_targets = self.bullet_base_pose_to_world_pose(
(self.ik_robot_target_pos_right, self.ik_robot_target_orn_right)
)
else:
self.target_pos_left = self.ik_robot_target_pos_left #+ np.array([0, 0, 0.913])
self.ik_robot_target_pos_left = dpos # += dpos
self.ik_robot_target_orn_left = rotation
world_targets = self.bullet_base_pose_to_world_pose(
(self.ik_robot_target_pos_left, self.ik_robot_target_orn_left)
)
# convert from target pose in base frame to target pose in bullet world frame
# Empirically, more iterations aren't needed, and it's faster
for _ in range(1):
rest_poses = self.robot_jpos_getter()
arm_joint_pos = self.inverse_kinematics(
world_targets[0],
world_targets[1],
use_right,
rest_poses=rest_poses,
)
if use_right:
both_arm_joint_pos = np.r_[arm_joint_pos, rest_poses[7:]]
else:
both_arm_joint_pos = np.r_[rest_poses[:7], arm_joint_pos]
self.sync_ik_robot(both_arm_joint_pos, sync_last=True)
return arm_joint_pos
def _get_current_error(self, current, set_point):
"""
Returns an array of differences between the desired joint positions and current
joint positions. Useful for PID control.
Args:
current: the current joint positions.
set_point: the joint positions that are desired as a numpy array.
Returns:
the current error in the joint positions.
"""
error = current - set_point
return error
def clip_joint_velocities(self, velocities):
"""
Clips joint velocities into a valid range.
"""
for i in range(len(velocities)):
if velocities[i] >= 1.0:
velocities[i] = 1.0
elif velocities[i] <= -1.0:
velocities[i] = -1.0
return velocities
class HSRIKController(object):
"""
Inverse kinematics for the Baxter robot, using Pybullet and the urdf description
files.
"""
def __init__(self, robot_jpos_getter, use_rot_mat=False):
"""
Args:
bullet_data_path (str): base path to bullet data.
robot_jpos_getter (function): function that returns the joint positions of
the robot to be controlled as a numpy array.
"""
# Set up inverse kinematics
self.robot_jpos_getter = robot_jpos_getter
self.use_rot_mat = use_rot_mat
path = os.getcwd() + '/opentamp' + "/robot_info/hsr/simple_hsrb4s.urdf"
self.setup_inverse_kinematics(path)
self.commanded_joint_positions = robot_jpos_getter()
self.sync_state()
self._name2id = {}
self._id2_name = {}
for i in range(p.getNumJoints(self.ik_robot)):
jnt_info = p.getJointInfo(i)
link_name = jnt_info[12]
self._name2id[link_name] = i
self._id2name[i] = link_name
def id2name(self, ind):
return self._id2name[ind]
def name2id(self, name):
return self._name2id[name]
def get_control(self, arm):
"""
Returns joint velocities to control the robot after the target end effector
positions and orientations are updated from arguments @left and @right.
Args:
aem (dict): A dictionary to control the end effector with these keys.
dpos (numpy array): a 3 dimensional array corresponding to the desired
change in x, y, and z end effector position.
rotation (numpy array): a rotation matrix of shape (3, 3) corresponding
to the desired orientation of the left end effector.
Returns:
velocities (numpy array): a flat array of joint velocity commands to apply
to try and achieve the desired input control.
"""
# Sync joint positions for IK.
self.sync_ik_robot(self.robot_jpos_getter())
# Compute target joint positions
self.commanded_joint_positions = self.joint_positions_for_eef_command(
arm
)
# P controller from joint positions (from IK) to velocities
velocities = np.zeros(14)
deltas = self._get_current_error(
self.robot_jpos_getter(), self.commanded_joint_positions
)
for i, delta in enumerate(deltas):
velocities[i] = -2 * delta
velocities = self.clip_joint_velocities(velocities)
self.commanded_joint_velocities = velocities
return velocities
# For debugging purposes: set joint positions directly
# robot.set_joint_positions(self.commanded_joint_positions)
def sync_state(self):
"""
Syncs the internal Pybullet robot state to the joint positions of the
robot being controlled.
"""
# sync IK robot state to the current robot joint positions
self.sync_ik_robot(self.robot_jpos_getter())
# make sure target pose is up to date
pos, orn = self.ik_robot_eef_joint_cartesian_pose()
self.ik_robot_target_pos = pos
self.ik_robot_target_orn = orn
def setup_inverse_kinematics(self, urdf_path):
"""
This function is responsible for doing any setup for inverse kinematics.
Inverse Kinematics maps end effector (EEF) poses to joint angles that
are necessary to achieve those poses.
"""
# These indices come from the urdf file we're using
self.effector = 31
# Use PyBullet to handle inverse kinematics.
# Set up a connection to the PyBullet simulator.
p.connect(p.DIRECT)
p.resetSimulation()
self.ik_robot = p.loadURDF(urdf_path, (0, 0, 0), (0, 0, 0, 1), useFixedBase=1)
# Relevant joints we care about. Many of the joints are fixed and don't count, so
# we need this second map to use the right ones.
self.actual = [23, 24, 25, 26, 27]
self.num_joints = p.getNumJoints(self.ik_robot)
n = p.getNumJoints(self.ik_robot)
self.rest = []
self.lower = []
self.upper = []
self.ranges = []
for i in range(n):
info = p.getJointInfo(self.ik_robot, i)
# Retrieve lower and upper ranges for each relevant joint
if info[3] > -1:
self.rest.append(p.getJointState(self.ik_robot, i)[0])
self.lower.append(info[8])
self.upper.append(info[9])
self.ranges.append(info[9] - info[8])
# Simulation will update as fast as it can in real time, instead of waiting for
# step commands like in the non-realtime case.
p.setRealTimeSimulation(1)
def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True):
"""
Force the internal robot model to match the provided joint angles.
Args:
joint_positions (list): a list or flat numpy array of joint positions.
simulate (bool): If True, actually use physics simulation, else
write to physics state directly.
sync_last (bool): If False, don't sync the last joint angle. This
is useful for directly controlling the roll at the end effector.
"""
num_joints = len(joint_positions)
if not sync_last:
num_joints -= 1
for i in range(num_joints):
if simulate:
p.setJointMotorControl2(
self.ik_robot,
self.actual[i],
p.POSITION_CONTROL,
targetVelocity=0,
targetPosition=joint_positions[i],
force=500,
positionGain=0.5,
velocityGain=1.,
)
else:
# Note that we use self.actual[i], and not i
p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])
def ik_robot_eef_joint_cartesian_pose(self):
"""
Returns the current cartesian pose of the last joint of the ik robot with respect
to the base frame as a (pos, orn) tuple where orn is a x-y-z-w quaternion.
"""
out = []
for eff in [self.effector]:
eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, eff)[0])
eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, eff)[1])
eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))
base_pos_in_world = np.array(
p.getBasePositionAndOrientation(self.ik_robot)[0]
)
base_orn_in_world = np.array(
p.getBasePositionAndOrientation(self.ik_robot)[1]
)
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
world_pose_in_base = T.pose_inv(base_pose_in_world)
eef_pose_in_base = T.pose_in_A_to_pose_in_B(
pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base
)
out.extend(T.mat2pose(eef_pose_in_base))
return out
def inverse_kinematics(
self,
target_position,
target_orientation,
rest_poses,
):
"""
Helper function to do inverse kinematics for a given target position and
orientation in the PyBullet world frame.
Args:
target_position_{right, left}: A tuple, list, or numpy array of size 3 for position.
target_orientation_{right, left}: A tuple, list, or numpy array of size 4 for
a orientation quaternion.
rest_poses: A list of size @num_joints to favor ik solutions close by.
Returns:
A list of size @num_joints corresponding to the joint angle solution.
"""
ik_solution = list(
p.calculateInverseKinematics(
self.ik_robot,
self.effector,
target_position,
targetOrientation=target_orientation,
restPoses=rest_poses[:7],
lowerLimits=self.lower,
upperLimits=self.upper,
jointRanges=self.ranges,
)
)
return ik_solution
def bullet_base_pose_to_world_pose(self, pose_in_base):
"""
Convert a pose in the base frame to a pose in the world frame.
Args:
pose_in_base: a (pos, orn) tuple.
Returns:
pose_in world: a (pos, orn) tuple.
"""
pose_in_base = T.pose2mat(pose_in_base)
base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])
base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])
base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))
pose_in_world = T.pose_in_A_to_pose_in_B(
pose_A=pose_in_base, pose_A_in_B=base_pose_in_world
)
return T.mat2pose(pose_in_world)
def joint_positions_for_eef_command(self, cmd):
"""
This function runs inverse kinematics to back out target joint positions
from the provided end effector command.
Same arguments as @get_control.
Returns:
A list of size @num_joints corresponding to the target joint angles.
"""
dpos = cmd["dpos"]
rotation = cmd["rotation"]
self.target_pos = self.ik_robot_target_pos #+ np.array([0, 0, 0.913])
self.ik_robot_target_pos = dpos # += dpos
self.ik_robot_target_orn = rotation
world_targets = self.bullet_base_pose_to_world_pose(
(self.ik_robot_target_pos, self.ik_robot_target_orn)
)
# convert from target pose in base frame to target pose in bullet world frame
# New pybullet iterates to convergence, so no need for multiple ik calls
for _ in range(1):
rest_poses = self.robot_jpos_getter()
arm_joint_pos = self.inverse_kinematics(
world_targets[0],
world_targets[1],
rest_poses=rest_poses,
)
self.sync_ik_robot(arm_joint_pos, sync_last=True)
return arm_joint_pos
def _get_current_error(self, current, set_point):
"""
Returns an array of differences between the desired joint positions and current
joint positions. Useful for PID control.
Args:
current: the current joint positions.
set_point: the joint positions that are desired as a numpy array.
Returns:
the current error in the joint positions.
"""
error = current - set_point
return error
def clip_joint_velocities(self, velocities):
"""
Clips joint velocities into a valid range.
"""
for i in range(len(velocities)):
if velocities[i] >= 1.0:
velocities[i] = 1.0
elif velocities[i] <= -1.0:
velocities[i] = -1.0
return velocities
| 37.315574
| 159
| 0.605821
| 3,507
| 27,315
| 4.494155
| 0.116339
| 0.033754
| 0.043271
| 0.022651
| 0.904194
| 0.896897
| 0.875642
| 0.862699
| 0.853118
| 0.853118
| 0
| 0.013178
| 0.319385
| 27,315
| 732
| 160
| 37.315574
| 0.834597
| 0.32451
| 0
| 0.6875
| 0
| 0
| 0.015796
| 0.005382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078125
| false
| 0
| 0.015625
| 0.013021
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e013ace0118e7cf42be281d641b2e0f75a25bf21
| 40
|
py
|
Python
|
vnpy/gateway/tqsdk/__init__.py
|
oysx/vnpy
|
626225ab3d4e80e8503bf908b751368732e6de0e
|
[
"MIT"
] | null | null | null |
vnpy/gateway/tqsdk/__init__.py
|
oysx/vnpy
|
626225ab3d4e80e8503bf908b751368732e6de0e
|
[
"MIT"
] | null | null | null |
vnpy/gateway/tqsdk/__init__.py
|
oysx/vnpy
|
626225ab3d4e80e8503bf908b751368732e6de0e
|
[
"MIT"
] | null | null | null |
from .tqsdk_gateway import TqsdkGateway
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e045264b067c4b311abc40f93cd2b31f19bc074b
| 40
|
py
|
Python
|
lib/__init__.py
|
foozzi/Loki
|
3a91382831a714a1a3f3159753fb1982c94cea7c
|
[
"MIT"
] | null | null | null |
lib/__init__.py
|
foozzi/Loki
|
3a91382831a714a1a3f3159753fb1982c94cea7c
|
[
"MIT"
] | null | null | null |
lib/__init__.py
|
foozzi/Loki
|
3a91382831a714a1a3f3159753fb1982c94cea7c
|
[
"MIT"
] | 1
|
2020-01-23T06:21:23.000Z
|
2020-01-23T06:21:23.000Z
|
# Date: 07/03/2018
# Author: Pure-L0G1C
| 20
| 20
| 0.675
| 7
| 40
| 3.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 0.15
| 40
| 2
| 20
| 20
| 0.5
| 0.875
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ecedfd895ee0d0b42675cca7fe824dc1f534a95
| 1,374
|
py
|
Python
|
opsgenie_swagger/api/__init__.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
opsgenie_swagger/api/__init__.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | null | null | null |
opsgenie_swagger/api/__init__.py
|
Logicworks/opsgenie-python-sdk
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
[
"Apache-2.0"
] | 1
|
2020-11-07T11:27:13.000Z
|
2020-11-07T11:27:13.000Z
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from opsgenie_swagger.api.account_api import AccountApi
from opsgenie_swagger.api.alert_api import AlertApi
from opsgenie_swagger.api.contact_api import ContactApi
from opsgenie_swagger.api.escalation_api import EscalationApi
from opsgenie_swagger.api.forwarding_rule_api import ForwardingRuleApi
from opsgenie_swagger.api.heartbeat_api import HeartbeatApi
from opsgenie_swagger.api.integration_api import IntegrationApi
from opsgenie_swagger.api.integration_action_api import IntegrationActionApi
from opsgenie_swagger.api.maintenance_api import MaintenanceApi
from opsgenie_swagger.api.notification_rule_api import NotificationRuleApi
from opsgenie_swagger.api.notification_rule_step_api import NotificationRuleStepApi
from opsgenie_swagger.api.policy_api import PolicyApi
from opsgenie_swagger.api.schedule_api import ScheduleApi
from opsgenie_swagger.api.schedule_override_api import ScheduleOverrideApi
from opsgenie_swagger.api.schedule_rotation_api import ScheduleRotationApi
from opsgenie_swagger.api.team_api import TeamApi
from opsgenie_swagger.api.team_member_api import TeamMemberApi
from opsgenie_swagger.api.team_routing_rule_api import TeamRoutingRuleApi
from opsgenie_swagger.api.user_api import UserApi
from opsgenie_swagger.api.who_is_on_call_api import WhoIsOnCallApi
| 52.846154
| 83
| 0.899563
| 185
| 1,374
| 6.367568
| 0.302703
| 0.203735
| 0.322581
| 0.373514
| 0.263158
| 0.064516
| 0
| 0
| 0
| 0
| 0
| 0.000782
| 0.069141
| 1,374
| 25
| 84
| 54.96
| 0.92025
| 0.02984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0eddbefe6e422bc8d871a1410aad46eb4d30823f
| 117
|
py
|
Python
|
silverware/text/__init__.py
|
idin/silverware
|
2c47931937f4b1d34e97a1dfa3e58255e57e3545
|
[
"MIT"
] | 1
|
2021-08-30T01:12:59.000Z
|
2021-08-30T01:12:59.000Z
|
silverware/text/__init__.py
|
idin/silverware
|
2c47931937f4b1d34e97a1dfa3e58255e57e3545
|
[
"MIT"
] | null | null | null |
silverware/text/__init__.py
|
idin/silverware
|
2c47931937f4b1d34e97a1dfa3e58255e57e3545
|
[
"MIT"
] | null | null | null |
from .get_html_text import get_html_text
from .get_text_and_depth import get_children, has_no_children, has_children
| 39
| 75
| 0.880342
| 21
| 117
| 4.380952
| 0.47619
| 0.152174
| 0.23913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08547
| 117
| 2
| 76
| 58.5
| 0.859813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
16004a0c401e36b028bf36d506b9adadd05dbe73
| 195
|
py
|
Python
|
Chapter04/deque_rotate.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 13
|
2018-06-21T01:44:49.000Z
|
2021-12-01T10:49:53.000Z
|
Chapter04/deque_rotate.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | null | null | null |
Chapter04/deque_rotate.py
|
PacktPublishing/Secret-Recipes-of-the-Python-Ninja
|
805d00c7a54927ba94c9077e9a580508ee3c5e56
|
[
"MIT"
] | 6
|
2018-10-05T08:29:24.000Z
|
2022-01-11T14:49:50.000Z
|
>>> d.rotate(1) # right rotation
>>> d
deque(['l', 'g', 'h', 'i', 'j', 'k'])
>>> d.rotate(-1) # left rotation
>>> d
deque(['g', 'h', 'i', 'j', 'k', 'l'])
| 27.857143
| 53
| 0.333333
| 26
| 195
| 2.5
| 0.5
| 0.215385
| 0.246154
| 0.123077
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015625
| 0.34359
| 195
| 6
| 54
| 32.5
| 0.492188
| 0.14359
| 0
| 0.333333
| 0
| 0
| 0.073171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16219d58647292a6ecd12e9fc05117fa8e099780
| 115
|
py
|
Python
|
zipline/pipeline/classifier.py
|
colin1alexander/zipline
|
ba42e6d8b972dcce9271526562ceff0cddd3fa30
|
[
"Apache-2.0"
] | 1
|
2019-03-29T01:46:35.000Z
|
2019-03-29T01:46:35.000Z
|
zipline/pipeline/classifier.py
|
colin1alexander/zipline
|
ba42e6d8b972dcce9271526562ceff0cddd3fa30
|
[
"Apache-2.0"
] | 1
|
2021-08-09T20:43:08.000Z
|
2021-08-09T20:43:08.000Z
|
zipline/pipeline/classifier.py
|
colin1alexander/zipline
|
ba42e6d8b972dcce9271526562ceff0cddd3fa30
|
[
"Apache-2.0"
] | 3
|
2017-08-31T12:34:13.000Z
|
2021-09-29T22:28:48.000Z
|
"""
classifier.py
"""
from zipline.pipeline.term import CompositeTerm
class Classifier(CompositeTerm):
pass
| 11.5
| 47
| 0.747826
| 12
| 115
| 7.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 9
| 48
| 12.777778
| 0.877551
| 0.113043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1673411cd95e62ec0b00051c68840c7dcaadaaac
| 49
|
py
|
Python
|
hello2.py
|
kartikchorasiya/Samaaj-Seva-For-HF
|
9b8c0cb263ab06f08493d2a3aa7fe6099ed128d6
|
[
"MIT"
] | 1
|
2018-10-16T17:48:27.000Z
|
2018-10-16T17:48:27.000Z
|
hello2.py
|
kartikchorasiya/Samaaj-Seva-For-HF
|
9b8c0cb263ab06f08493d2a3aa7fe6099ed128d6
|
[
"MIT"
] | 1
|
2018-10-16T16:11:42.000Z
|
2018-10-16T16:11:42.000Z
|
hello2.py
|
kartikchorasiya/Samaaj-Seva-For-HF
|
9b8c0cb263ab06f08493d2a3aa7fe6099ed128d6
|
[
"MIT"
] | 4
|
2018-10-16T16:14:23.000Z
|
2018-10-16T17:41:29.000Z
|
def hello():
print("Say hello to uncle")
| 12.25
| 32
| 0.571429
| 7
| 49
| 4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 49
| 3
| 33
| 16.333333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1682481e3b64e0e46ee7a291660f25b4fb6ff559
| 192
|
py
|
Python
|
vlasisku/components/__init__.py
|
Pendrokar/vlasisku
|
e1db74b6cbab51e84cc84cb1d046dbc9622bdf07
|
[
"Unlicense"
] | null | null | null |
vlasisku/components/__init__.py
|
Pendrokar/vlasisku
|
e1db74b6cbab51e84cc84cb1d046dbc9622bdf07
|
[
"Unlicense"
] | null | null | null |
vlasisku/components/__init__.py
|
Pendrokar/vlasisku
|
e1db74b6cbab51e84cc84cb1d046dbc9622bdf07
|
[
"Unlicense"
] | null | null | null |
from vlasisku.components.app import app
from vlasisku.components.general import general
from vlasisku.components.opensearch import os as opensearch
from vlasisku.components.pages import pages
| 38.4
| 59
| 0.864583
| 26
| 192
| 6.384615
| 0.384615
| 0.289157
| 0.53012
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 192
| 4
| 60
| 48
| 0.954023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
16b43b29f54fee709fc618b25c5d65ef6eb82f0b
| 5,394
|
py
|
Python
|
google/ads/google_ads/v6/proto/services/ad_group_ad_service_pb2_grpc.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/services/ad_group_ad_service_pb2_grpc.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/services/ad_group_ad_service_pb2_grpc.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.ads.google_ads.v6.proto.resources import ad_group_ad_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_ad__group__ad__pb2
from google.ads.google_ads.v6.proto.services import ad_group_ad_service_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2
class AdGroupAdServiceStub(object):
"""Proto file describing the Ad Group Ad service.
Service to manage ads in an ad group.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAdGroupAd = channel.unary_unary(
'/google.ads.googleads.v6.services.AdGroupAdService/GetAdGroupAd',
request_serializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.GetAdGroupAdRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_ad__group__ad__pb2.AdGroupAd.FromString,
)
self.MutateAdGroupAds = channel.unary_unary(
'/google.ads.googleads.v6.services.AdGroupAdService/MutateAdGroupAds',
request_serializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.MutateAdGroupAdsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.MutateAdGroupAdsResponse.FromString,
)
class AdGroupAdServiceServicer(object):
"""Proto file describing the Ad Group Ad service.
Service to manage ads in an ad group.
"""
def GetAdGroupAd(self, request, context):
"""Returns the requested ad in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateAdGroupAds(self, request, context):
"""Creates, updates, or removes ads. Operation statuses are returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AdGroupAdServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAdGroupAd': grpc.unary_unary_rpc_method_handler(
servicer.GetAdGroupAd,
request_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.GetAdGroupAdRequest.FromString,
response_serializer=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_ad__group__ad__pb2.AdGroupAd.SerializeToString,
),
'MutateAdGroupAds': grpc.unary_unary_rpc_method_handler(
servicer.MutateAdGroupAds,
request_deserializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.MutateAdGroupAdsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.MutateAdGroupAdsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v6.services.AdGroupAdService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class AdGroupAdService(object):
"""Proto file describing the Ad Group Ad service.
Service to manage ads in an ad group.
"""
@staticmethod
def GetAdGroupAd(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AdGroupAdService/GetAdGroupAd',
google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.GetAdGroupAdRequest.SerializeToString,
google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_ad__group__ad__pb2.AdGroupAd.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def MutateAdGroupAds(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.ads.googleads.v6.services.AdGroupAdService/MutateAdGroupAds',
google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.MutateAdGroupAdsRequest.SerializeToString,
google_dot_ads_dot_googleads_dot_v6_dot_services_dot_ad__group__ad__service__pb2.MutateAdGroupAdsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 48.160714
| 164
| 0.731554
| 607
| 5,394
| 5.98682
| 0.186161
| 0.042378
| 0.047056
| 0.057788
| 0.772702
| 0.768299
| 0.756192
| 0.703908
| 0.703908
| 0.670336
| 0
| 0.008635
| 0.205599
| 5,394
| 111
| 165
| 48.594595
| 0.83944
| 0.11383
| 0
| 0.438356
| 1
| 0
| 0.091452
| 0.065871
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.041096
| 0.027397
| 0.191781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16e2b9ff320947c585dda85353b569e14a0c3552
| 19,150
|
py
|
Python
|
tests/test_buffering.py
|
guaacoelho/devito
|
7e0b873114675752c4a49ed9076ee5d52997833c
|
[
"MIT"
] | null | null | null |
tests/test_buffering.py
|
guaacoelho/devito
|
7e0b873114675752c4a49ed9076ee5d52997833c
|
[
"MIT"
] | null | null | null |
tests/test_buffering.py
|
guaacoelho/devito
|
7e0b873114675752c4a49ed9076ee5d52997833c
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from devito import (Constant, Grid, TimeFunction, SparseTimeFunction, Operator,
Eq, ConditionalDimension, SubDimension, SubDomain, configuration)
from devito.ir import FindSymbols, retrieve_iteration_tree
from devito.exceptions import InvalidOperator
def test_read_write():
nt = 10
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
eqn = Eq(u.forward, u + 1)
op0 = Operator(eqn, opt='noop')
op1 = Operator(eqn, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
assert buffers.pop().symbolic_shape[0] == 2
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_write_only():
nt = 10
grid = Grid(shape=(4, 4))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
eqns = [Eq(v.forward, v + 1, implicit_dims=time),
Eq(u, v)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 1
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
def test_read_only():
nt = 10
grid = Grid(shape=(2, 2))
u = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
for i in range(nt):
u.data[i, :] = i
eqns = [Eq(v.forward, v + u.backward + u + u.forward + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, v=v1)
assert np.all(v.data == v1.data)
def test_read_only_w_offset():
nt = 10
grid = Grid(shape=(2, 2))
u = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
for i in range(nt):
u.data[i, :] = i
eqns = [Eq(v.forward, v + u.backward + u + u.forward + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
op0.apply(time_M=nt-2, time_m=4)
op1.apply(time_M=nt-2, time_m=4, v=v1)
assert np.all(v.data == v1.data)
def test_read_only_backwards():
nt = 10
grid = Grid(shape=(2, 2))
u = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
for i in range(nt):
u.data[i, :] = i
eqns = [Eq(v.backward, v + u.backward + u + u.forward + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_m=1)
op1.apply(time_m=1, v=v1)
assert np.all(v.data == v1.data)
def test_read_only_backwards_unstructured():
"""
Instead of the class `time-1`, `time`, and `time+1`, here we access the
buffered Function via `time-2`, `time-1` and `time+2`.
"""
nt = 10
grid = Grid(shape=(2, 2))
u = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
for i in range(nt):
u.data[i, :] = i
eqns = [Eq(v.backward, v + u.backward.backward + u.backward + u.forward.forward + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_m=2)
op1.apply(time_m=2, v=v1)
assert np.all(v.data == v1.data)
@pytest.mark.parametrize('async_degree', [2, 4])
def test_async_degree(async_degree):
nt = 10
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
eqn = Eq(u.forward, u + 1)
op0 = Operator(eqn, opt='noop')
op1 = Operator(eqn, opt=('buffering', {'buf-async-degree': async_degree}))
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
assert buffers.pop().symbolic_shape[0] == async_degree
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_two_homogeneous_buffers():
nt = 10
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid, save=nt)
v1 = TimeFunction(name='v', grid=grid, save=nt)
eqns = [Eq(u.forward, u + v + u.backward + v.backward + 1.),
Eq(v.forward, u + v + u.backward + v.backward + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
op2 = Operator(eqns, opt=('buffering', 'fuse'))
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
assert len(retrieve_iteration_tree(op2)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 2
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
def test_two_heterogeneous_buffers():
nt = 10
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid, save=nt)
v1 = TimeFunction(name='v', grid=grid, save=nt)
for i in range(nt):
u.data[i, :] = i
u1.data[i, :] = i
eqns = [Eq(u.forward, u + v + 1),
Eq(v.forward, u + v + v.backward)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 3
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 2
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
def test_over_injection():
nt = 10
grid = Grid(shape=(4, 4))
src = SparseTimeFunction(name='src', grid=grid, npoint=1, nt=nt)
rec = SparseTimeFunction(name='rec', grid=grid, npoint=1, nt=nt)
u = TimeFunction(name="u", grid=grid, time_order=2, space_order=2, save=nt)
u1 = TimeFunction(name="u", grid=grid, time_order=2, space_order=2, save=nt)
src.data[:] = 1.
eqns = ([Eq(u.forward, u + 1)] +
src.inject(field=u.forward, expr=src) +
rec.interpolate(expr=u.forward))
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) ==\
5 + bool(configuration['language'] != 'C')
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_over_one_subdomain():
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 3, 3), y: ('middle', 3, 3)}
s_d0 = sd0()
nt = 10
grid = Grid(shape=(10, 10), subdomains=(s_d0,))
u = TimeFunction(name="u", grid=grid, save=nt)
u1 = TimeFunction(name="u", grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
eqns = [Eq(v.forward, v + 1, subdomain=s_d0),
Eq(u, v, subdomain=s_d0)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
def test_over_one_subdomain_read_only():
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 3, 3), y: ('middle', 3, 3)}
s_d0 = sd0()
nt = 10
grid = Grid(shape=(10, 10), subdomains=(s_d0,))
u = TimeFunction(name="u", grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
for i in range(nt):
u.data[i, :] = i
eqns = [Eq(v.forward, v + u + u.forward + 2., subdomain=s_d0)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, v=v1)
assert np.all(v.data == v1.data)
def test_over_two_subdomains_illegal():
"""
Cannot use buffering when:
* an Eq writes to `f` using one set of SubDimensions
* another Eq reads from `f` through a different set of SubDimensions
as the second Eq may want to read unwritten memory (i.e., zero-valued)
in the buffered Function, while with buffering it might end up reading values
written in a previous iteration, thus breaking a storage-related RAW dependence.
"""
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 3, 3), y: ('middle', 3, 3)}
class sd1(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: ('middle', 2, 2)}
s_d0 = sd0()
s_d1 = sd1()
nt = 10
grid = Grid(shape=(10, 10), subdomains=(s_d0, s_d1))
u = TimeFunction(name="u", grid=grid, save=nt)
eqns = [Eq(u.forward, u + 1, subdomain=s_d0),
Eq(u.forward, u.forward + 1, subdomain=s_d1)]
try:
Operator(eqns, opt='buffering')
except InvalidOperator:
assert True
except:
assert False
@pytest.mark.xfail(reason="Cannot deal with non-overlapping SubDimensions yet")
def test_over_two_subdomains():
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('left', 2), y: ('left', 2)}
class sd1(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: ('middle', 2, 2)}
s_d0 = sd0()
s_d1 = sd1()
nt = 10
grid = Grid(shape=(10, 10), subdomains=(s_d0, s_d1))
u = TimeFunction(name="u", grid=grid, save=nt)
u1 = TimeFunction(name="u", grid=grid, save=nt)
eqns = [Eq(u.forward, u + 1, subdomain=s_d0),
Eq(u.forward, u.forward + u + 1, subdomain=s_d1)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_subdims():
nt = 10
grid = Grid(shape=(10, 10, 10))
x, y, z = grid.dimensions
xi = SubDimension.middle(name='xi', parent=x, thickness_left=2, thickness_right=2)
yi = SubDimension.middle(name='yi', parent=y, thickness_left=2, thickness_right=2)
zi = SubDimension.middle(name='zi', parent=z, thickness_left=2, thickness_right=2)
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
eqn = Eq(u.forward, u + 1).xreplace({x: xi, y: yi, z: zi})
op0 = Operator(eqn, opt='noop')
op1 = Operator(eqn, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
assert len([i for i in FindSymbols().visit(op1) if i.is_Array]) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_conddim_backwards():
nt = 10
grid = Grid(shape=(4, 4))
time_dim = grid.time_dim
x, y = grid.dimensions
factor = Constant(name='factor', value=2, dtype=np.int32)
time_sub = ConditionalDimension(name="time_sub", parent=time_dim, factor=factor)
u = TimeFunction(name='u', grid=grid, time_order=0, save=nt, time_dim=time_sub)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
for i in range(u.save):
u.data[i, :] = i
eqns = [Eq(v.backward, v.backward + v + u + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 3
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_m=1, time_M=9)
op1.apply(time_m=1, time_M=9, v=v1)
assert np.all(v.data == v1.data)
def test_conddim_backwards_unstructured():
nt = 10
grid = Grid(shape=(4, 4))
time_dim = grid.time_dim
x, y = grid.dimensions
factor = Constant(name='factor', value=2, dtype=np.int32)
time_sub = ConditionalDimension(name="time_sub", parent=time_dim, factor=factor)
u = TimeFunction(name='u', grid=grid, time_order=0, save=nt, time_dim=time_sub)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
for i in range(u.save):
u.data[i, :] = i
ub = u[time_sub - 1, x, y]
ubb = u[time_sub - 2, x, y]
uff = u[time_sub + 2, x, y]
eqns = [Eq(v.backward, v.backward + v + ubb + ub + uff + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 3
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
# Note 1: cannot use time_m<4 or time_M>14 or there would be OOB accesses
# due to `ubb` and `uff`, which read two steps away from the current point,
# while `u` has in total `nt=10` entries (so last one has index 9). In
# particular, at `time_M=14` we will read from `uff = u[time/factor + 2] =
# u[14/2+2] = u[9]`, which is the last available entry in `u`. Likewise,
# at `time_m=4` we will read from `ubb = u[time/factor - 2`] = u[4/2 - 2] =
# u[0]`, which is clearly the last accessible entry in `u` while iterating
# in the backward direction
# Note 2: Given `factor=2`, we always write to `v` when `time % 2 == 0`, which
# means that we always write to `v[t1] = v[(time+1)%2] = v[1]`, while `v[0]`
# remains zero-valued. So the fact that the Eq is also reading from `v` is
# only relevant to induce the backward iteration direction
op0.apply(time_m=4, time_M=14)
op1.apply(time_m=4, time_M=14, v=v1)
assert np.all(v.data == v1.data)
def test_conddim_w_shifting():
nt = 50
grid = Grid(shape=(5, 5))
time = grid.time_dim
factor = Constant(name='factor', value=5, dtype=np.int32)
t_sub = ConditionalDimension('t_sub', parent=time, factor=factor)
save_shift = Constant(name='save_shift', dtype=np.int32)
u = TimeFunction(name='u', grid=grid, time_order=0)
u1 = TimeFunction(name='u', grid=grid, time_order=0)
usave = TimeFunction(name='usave', grid=grid, time_order=0,
save=(int(nt//factor.data)), time_dim=t_sub)
for i in range(usave.save):
usave.data[i, :] = i
eqns = Eq(u.forward, u + usave.subs(t_sub, t_sub - save_shift))
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 3
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
# From time_m=15 to time_M=35 with a factor=5 -- it means that, thanks
# to t_sub, we enter the Eq exactly (35-15)/5 + 1 = 5 times. We set
# save_shift=1 so instead of accessing the range usave[15/5:35/5+1],
# we rather access the range usave[15/5-1:35:5], which means accessing
# the usave values 2, 3, 4, 5, 6.
op0.apply(time_m=15, time_M=35, save_shift=1)
op1.apply(time_m=15, time_M=35, save_shift=1, u=u1)
assert np.allclose(u.data, 20)
assert np.all(u.data == u1.data)
# Again, but with a different shift
op1.apply(time_m=15, time_M=35, save_shift=-2, u=u1)
assert np.allclose(u1.data, 20 + 35)
def test_multi_access():
nt = 10
grid = Grid(shape=(2, 2))
u = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
w = TimeFunction(name='w', grid=grid)
w1 = TimeFunction(name='w', grid=grid)
for i in range(nt):
u.data[i, :] = i
eqns = [Eq(v.forward, v + u.forward + 1.),
Eq(w.forward, w + u + 1.)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, v=v1, w=w1)
assert np.all(v.data == v1.data)
assert np.all(w.data == w1.data)
def test_everything():
nt = 50
grid = Grid(shape=(6, 6))
x, y = grid.dimensions
time = grid.time_dim
xi = SubDimension.middle(name='xi', parent=x, thickness_left=2, thickness_right=2)
yi = SubDimension.middle(name='yi', parent=y, thickness_left=2, thickness_right=2)
factor = Constant(name='factor', value=5, dtype=np.int32)
t_sub = ConditionalDimension('t_sub', parent=time, factor=factor)
save_shift = Constant(name='save_shift', dtype=np.int32)
u = TimeFunction(name='u', grid=grid, time_order=0)
u1 = TimeFunction(name='u', grid=grid, time_order=0)
va = TimeFunction(name='va', grid=grid, time_order=0,
save=(int(nt//factor.data)), time_dim=t_sub)
vb = TimeFunction(name='vb', grid=grid, time_order=0,
save=(int(nt//factor.data)), time_dim=t_sub)
for i in range(va.save):
va.data[i, :] = i
vb.data[i, :] = i*2 - 1
vas = va.subs(t_sub, t_sub - save_shift)
vasb = va.subs(t_sub, t_sub - 1 - save_shift)
vasf = va.subs(t_sub, t_sub + 1 - save_shift)
eqns = [Eq(u.forward, u + (vasb + vas + vasf)*2. + vb)]
eqns = [e.xreplace({x: xi, y: yi}) for e in eqns]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len([i for i in FindSymbols().visit(op1) if i.is_Array]) == 2
op0.apply(time_m=15, time_M=35, save_shift=0)
op1.apply(time_m=15, time_M=35, save_shift=0, u=u1)
assert np.all(u.data == u1.data)
| 29.598145
| 90
| 0.610862
| 3,039
| 19,150
| 3.764067
| 0.088187
| 0.057348
| 0.034094
| 0.056911
| 0.792989
| 0.762042
| 0.745083
| 0.725938
| 0.713699
| 0.702771
| 0
| 0.039443
| 0.230809
| 19,150
| 646
| 91
| 29.643963
| 0.737135
| 0.103499
| 0
| 0.719512
| 0
| 0
| 0.034424
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 1
| 0.063415
| false
| 0
| 0.012195
| 0
| 0.119512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16edcb27e4b574e37a5ed07ec712b3eea56f923e
| 267
|
py
|
Python
|
src/clikit/io/output_stream/__init__.py
|
finswimmer/clikit
|
206198eb60d53c5daefa715f8a93e1fe85ffcf7e
|
[
"MIT"
] | 78
|
2018-12-06T16:18:08.000Z
|
2022-03-18T01:44:06.000Z
|
src/clikit/io/output_stream/__init__.py
|
finswimmer/clikit
|
206198eb60d53c5daefa715f8a93e1fe85ffcf7e
|
[
"MIT"
] | 24
|
2019-05-11T16:40:06.000Z
|
2022-02-27T01:11:04.000Z
|
src/clikit/io/output_stream/__init__.py
|
finswimmer/clikit
|
206198eb60d53c5daefa715f8a93e1fe85ffcf7e
|
[
"MIT"
] | 17
|
2019-01-14T18:18:01.000Z
|
2022-03-07T23:05:46.000Z
|
from .buffered_output_stream import BufferedOutputStream
from .error_output_stream import ErrorOutputStream
from .null_output_stream import NullOutputStream
from .standard_output_stream import StandardOutputStream
from .stream_output_stream import StreamOutputStream
| 44.5
| 56
| 0.906367
| 30
| 267
| 7.733333
| 0.433333
| 0.258621
| 0.387931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074906
| 267
| 5
| 57
| 53.4
| 0.939271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc413fca6bd983abc0eebf2c9657d5821e8bddf1
| 132
|
py
|
Python
|
lightflow/queue/__init__.py
|
portrain/Lightflow
|
31f44a55f27b0c61da2631454ca592b641ab8145
|
[
"BSD-3-Clause"
] | null | null | null |
lightflow/queue/__init__.py
|
portrain/Lightflow
|
31f44a55f27b0c61da2631454ca592b641ab8145
|
[
"BSD-3-Clause"
] | null | null | null |
lightflow/queue/__init__.py
|
portrain/Lightflow
|
31f44a55f27b0c61da2631454ca592b641ab8145
|
[
"BSD-3-Clause"
] | null | null | null |
from .const import JobExecPath, JobStatus, JobType, JobEventName
__all__ = ['JobExecPath', 'JobStatus', 'JobType', 'JobEventName']
| 33
| 65
| 0.757576
| 12
| 132
| 8
| 0.666667
| 0.416667
| 0.5625
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106061
| 132
| 3
| 66
| 44
| 0.813559
| 0
| 0
| 0
| 0
| 0
| 0.295455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
bc46d4b180721343e20921f93653da1a4ca4493b
| 314
|
py
|
Python
|
src/googleanalytics/exception.py
|
loggly/python-googleanalytics
|
f0e594b95c3058a5a91131e9281eff875a4ce8c8
|
[
"BSD-3-Clause"
] | 68
|
2015-01-14T19:29:03.000Z
|
2022-02-12T09:32:05.000Z
|
src/googleanalytics/exception.py
|
loggly/python-googleanalytics
|
f0e594b95c3058a5a91131e9281eff875a4ce8c8
|
[
"BSD-3-Clause"
] | 1
|
2019-02-27T21:08:34.000Z
|
2019-02-27T21:08:34.000Z
|
src/googleanalytics/exception.py
|
loggly/python-googleanalytics
|
f0e594b95c3058a5a91131e9281eff875a4ce8c8
|
[
"BSD-3-Clause"
] | 28
|
2015-02-17T20:00:49.000Z
|
2021-07-21T10:42:36.000Z
|
class GoogleAnalyticsClientError(Exception):
"""
General Google Analytics error (error accessing GA)
"""
def __init__(self, reason):
self.reason = reason
def __repr__(self):
return 'GAError: %s' % self.reason
def __str__(self):
return 'GAError: %s' % self.reason
| 22.428571
| 55
| 0.627389
| 33
| 314
| 5.606061
| 0.545455
| 0.216216
| 0.183784
| 0.194595
| 0.302703
| 0.302703
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261147
| 314
| 13
| 56
| 24.153846
| 0.797414
| 0.16242
| 0
| 0.285714
| 0
| 0
| 0.089069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.285714
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
bc5fefba5acf89086670ad5e450acce4089cab02
| 28
|
py
|
Python
|
proxyless_nas_tensorflow/__init__.py
|
RogerChern/ProxylessNAS
|
9d19b74041eb749d540208da4e7e51f9053bbcf9
|
[
"Apache-2.0"
] | 558
|
2019-04-08T02:28:31.000Z
|
2020-02-12T08:18:57.000Z
|
proxyless_nas_tensorflow/__init__.py
|
RogerChern/ProxylessNAS
|
9d19b74041eb749d540208da4e7e51f9053bbcf9
|
[
"Apache-2.0"
] | 3
|
2018-12-06T11:54:55.000Z
|
2018-12-19T19:21:30.000Z
|
proxyless_nas_tensorflow/__init__.py
|
RogerChern/ProxylessNAS
|
9d19b74041eb749d540208da4e7e51f9053bbcf9
|
[
"Apache-2.0"
] | 130
|
2019-04-08T01:58:17.000Z
|
2020-02-07T10:23:34.000Z
|
from .tf_model_zoo import *
| 14
| 27
| 0.785714
| 5
| 28
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc6ced6277d0eb97fc11f070f0352ebcc99db90a
| 11,755
|
py
|
Python
|
tests/components/test_custom_action.py
|
LL0814/mindmeld
|
a616bdcec9c68aaf17c12727118c4436254f6628
|
[
"Apache-2.0"
] | 1
|
2020-08-06T03:00:45.000Z
|
2020-08-06T03:00:45.000Z
|
tests/components/test_custom_action.py
|
LL0814/mindmeld
|
a616bdcec9c68aaf17c12727118c4436254f6628
|
[
"Apache-2.0"
] | null | null | null |
tests/components/test_custom_action.py
|
LL0814/mindmeld
|
a616bdcec9c68aaf17c12727118c4436254f6628
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_custom_action
----------------------------------
Tests for custom actions.
"""
import pytest
from unittest.mock import Mock, patch
from mindmeld import Application
from mindmeld.components import (
CustomAction,
invoke_custom_action,
invoke_custom_action_async,
)
from mindmeld.components.dialogue import DialogueResponder
from mindmeld.components.request import Request
def test_custom_action_config(kwik_e_mart_app):
"""Test to get custom action config from app"""
assert kwik_e_mart_app.custom_action_config is not None
assert "url" in kwik_e_mart_app.custom_action_config
assert kwik_e_mart_app.custom_action_config["url"] == "http://0.0.0.0:8080/"
def test_custom_action():
"""Test CustomAction.invoke to ensure that the parameters of the JSON body are correct"""
action_config = {"url": "http://localhost:8080/v2/action"}
action = CustomAction(name="action_call_people", config=action_config)
with patch("requests.post") as mock_object:
mock_object.return_value = Mock()
mock_object.return_value.status_code = 200
mock_object.return_value.json.return_value = {}
request = Request(
text="sing a song", domain="some domain", intent="some intent"
)
responder = DialogueResponder()
assert action.invoke(request, responder)
assert mock_object.call_args[1]["url"] == action_config["url"]
assert "request" in mock_object.call_args[1]["json"]
assert "responder" in mock_object.call_args[1]["json"]
assert mock_object.call_args[1]["json"]["action"] == "action_call_people"
def test_custom_action_merge():
"Test `merge=True` for custom actions"
action_config = {"url": "http://localhost:8080/v2/action"}
action = CustomAction(name="action_call_people", config=action_config)
with patch("requests.post") as mock_object:
mock_object.return_value = Mock()
mock_object.return_value.status_code = 200
mock_object.return_value.json.return_value = {
"directives": ["directive3", "directive4"],
"frame": {"k2": "v2"},
"slots": {"s2": "v2"},
"params": {
"allowed_intents": ["intent3", "intent4"],
"dynamic_resource": {"r2": "v2"},
"language": "some-language",
"locale": "some-locale",
"time_zone": "some-time-zone",
"target_dialogue_state": "some-state",
"timestamp": "some-timestamp",
},
}
request = Request(
text="sing a song", domain="some domain", intent="some intent"
)
responder = DialogueResponder()
responder.directives = ["directive1", "directive2"]
responder.frame = {"k1": "v1"}
responder.slots = {"s1": "v1"}
responder.params.allowed_intents = ("intent1", "intent2")
responder.params.dynamic_resource = {"r1": "v1"}
assert action.invoke(request, responder)
assert responder.directives == [
"directive1",
"directive2",
"directive3",
"directive4",
]
assert responder.frame == {"k1": "v1", "k2": "v2"}
assert responder.slots == {"s1": "v1", "s2": "v2"}
assert responder.params.allowed_intents == (
"intent1",
"intent2",
"intent3",
"intent4",
)
assert responder.params.dynamic_resource == {"r1": "v1", "r2": "v2"}
assert responder.params.target_dialogue_state == "some-state"
assert responder.params.language == "some-language"
assert responder.params.locale == "some-locale"
assert responder.params.time_zone == "some-time-zone"
assert responder.params.timestamp == "some-timestamp"
def test_custom_action_no_merge():
"Test `merge=False` for custom actions"
action_config = {"url": "http://localhost:8080/v2/action"}
action = CustomAction(name="action_call_people", config=action_config, merge=False)
with patch("requests.post") as mock_object:
mock_object.return_value = Mock()
mock_object.return_value.status_code = 200
mock_object.return_value.json.return_value = {
"directives": ["directive3", "directive4"],
"frame": {"k2": "v2"},
"slots": {"s2": "v2"},
"params": {
"allowed_intents": ["intent3", "intent4"],
"dynamic_resource": {"r2": "v2"},
"language": "some-language",
"locale": "some-locale",
"time_zone": "some-time-zone",
"target_dialogue_state": "some-state",
"timestamp": "some-timestamp",
},
}
request = Request(
text="sing a song", domain="some domain", intent="some intent"
)
responder = DialogueResponder()
responder.directives = ["directive1", "directive2"]
responder.frame = {"k1": "v1"}
responder.slots = {"s1": "v1"}
responder.params.allowed_intents = ("intent1", "intent2")
responder.params.dynamic_resource = {"r1": "v1"}
assert action.invoke(request, responder)
assert responder.directives == [
"directive3",
"directive4",
]
assert responder.frame == {"k2": "v2"}
assert responder.slots == {"s2": "v2"}
assert tuple(responder.params.allowed_intents) == ("intent3", "intent4",)
assert responder.params.dynamic_resource == {"r2": "v2"}
assert responder.params.target_dialogue_state == "some-state"
assert responder.params.language == "some-language"
assert responder.params.locale == "some-locale"
assert responder.params.time_zone == "some-time-zone"
assert responder.params.timestamp == "some-timestamp"
def test_invoke_custom_action():
"""Test invoke_custom_action to ensure that the parameters of the JSON body are correct"""
action_config = {"url": "http://localhost:8080/v2/action"}
with patch("requests.post") as mock_object:
mock_object.return_value = Mock()
mock_object.return_value.status_code = 200
mock_object.return_value.json.return_value = {}
request = Request(
text="sing a song", domain="some domain", intent="some intent"
)
responder = DialogueResponder()
assert invoke_custom_action(
"action_call_people", action_config, request, responder
)
assert mock_object.call_args[1]["url"] == action_config["url"]
assert "request" in mock_object.call_args[1]["json"]
assert "responder" in mock_object.call_args[1]["json"]
assert mock_object.call_args[1]["json"]["action"] == "action_call_people"
@pytest.mark.asyncio
async def test_custom_action_async():
"""Test CustomAction.invoke_async to ensure that the parameters of the JSON body are correct"""
action_config = {"url": "http://localhost:8080/v2/action"}
action = CustomAction(name="action_call_people", config=action_config)
with patch("mindmeld.components.CustomAction.post_async") as mock_object:
async def mock_coroutine():
return 200, {}
mock_object.return_value = mock_coroutine()
request = Request(
text="sing a song", domain="some domain", intent="some intent"
)
responder = DialogueResponder()
assert await action.invoke_async(request, responder)
call_args = mock_object.call_args_list[0][0][0]
assert "request" in call_args
assert "responder" in call_args
assert call_args["action"] == "action_call_people"
@pytest.mark.asyncio
async def test_invoke_custom_action_async():
"""Test invoke_custom_action_async to ensure that the parameters of the JSON body are correct"""
action_config = {"url": "http://localhost:8080/v2/action"}
with patch("mindmeld.components.CustomAction.post_async") as mock_object:
async def mock_coroutine():
return 200, {}
mock_object.return_value = mock_coroutine()
request = Request(
text="sing a song", domain="some domain", intent="some intent"
)
responder = DialogueResponder()
assert await invoke_custom_action_async(
"action_call_people", action_config, request, responder
)
call_args = mock_object.call_args_list[0][0][0]
assert "request" in call_args
assert "responder" in call_args
assert call_args["action"] == "action_call_people"
def test_custom_action_handler(home_assistant_nlp):
"""Test Application.custom_action handle"""
app = Application("home_assistant")
app.lazy_init(home_assistant_nlp)
app.custom_action_config = {"url": "some-url"}
app.custom_action(intent="set_thermostat", action="set-thermostat")
app.custom_action(default=True, action="times-and-dates")
with patch("requests.post") as mock_object:
mock_object.return_value = Mock()
mock_object.return_value.status_code = 200
mock_object.return_value.json.return_value = {
"directives": ["set-thermostat-action"]
}
# invoke set thermostat intent
res = app.app_manager.parse("turn it to 70 degrees")
assert res.directives == ["set-thermostat-action"]
assert mock_object.call_args[1]["url"] == "some-url"
assert mock_object.call_args[1]["json"]["action"] == "set-thermostat"
mock_object.return_value.json.return_value = {
"directives": ["time-and-dates-action"]
}
# invoke time & dates intent
res = app.app_manager.parse("change my alarm to 9")
assert res.directives == ["time-and-dates-action"]
assert mock_object.call_args[1]["url"] == "some-url"
assert mock_object.call_args[1]["json"]["action"] == "times-and-dates"
def test_custom_action_sequence(home_assistant_nlp):
"""Test Application.custom_action handle for a sequence of actions"""
app = Application("home_assistant")
app.lazy_init(home_assistant_nlp)
app.custom_action_config = {"url": "some-url"}
app.custom_action(
intent="set_thermostat", actions=["set-thermostat", "clear-thermostat"]
)
with patch("requests.post") as mock_object:
mock_object.return_value = Mock()
mock_object.return_value.status_code = 200
mock_object.return_value.json.return_value = {"directives": ["some-directive"]}
# invoke set thermostat intent and we should expect two directives
res = app.app_manager.parse("turn it to 70 degrees")
assert res.directives == ["some-directive", "some-directive"]
assert mock_object.call_args[1]["url"] == "some-url"
@pytest.mark.asyncio
async def test_custom_action_handler_async(home_assistant_nlp):
"""Test Application.custom_action handle with async mode"""
app = Application("home_assistant", async_mode=True)
app.lazy_init(home_assistant_nlp)
app.custom_action_config = {"url": "some-url"}
app.custom_action(intent="set_thermostat", action="set-thermostat", async_mode=True)
app.custom_action(default=True, action="times-and-dates", async_mode=True)
with patch("mindmeld.components.CustomAction.post_async") as mock_object:
async def mock_coroutine():
return 200, {"directives": ["set-thermostat-action"]}
mock_object.return_value = mock_coroutine()
# invoke set thermostat intent
res = await app.app_manager.parse("turn it to 70 degrees")
assert res.directives == ["set-thermostat-action"]
| 40.395189
| 100
| 0.640068
| 1,362
| 11,755
| 5.315712
| 0.110866
| 0.063536
| 0.048619
| 0.063812
| 0.853039
| 0.80953
| 0.792127
| 0.766436
| 0.717265
| 0.7
| 0
| 0.017867
| 0.228669
| 11,755
| 290
| 101
| 40.534483
| 0.780633
| 0.056231
| 0
| 0.656388
| 0
| 0
| 0.21873
| 0.027295
| 0
| 0
| 0
| 0
| 0.229075
| 1
| 0.030837
| false
| 0
| 0.026432
| 0
| 0.070485
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bc6ea7c0ac7dfa4310b281540aae1ed5cae23ade
| 106
|
py
|
Python
|
terrascript/kubernetes/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/kubernetes/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/kubernetes/__init__.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/kubernetes/__init__.py
import terrascript
class kubernetes(terrascript.Provider):
pass
| 17.666667
| 39
| 0.811321
| 11
| 106
| 7.454545
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113208
| 106
| 6
| 40
| 17.666667
| 0.87234
| 0.320755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bcd654d6f58543e1e3703e7dcda89278452e7769
| 57
|
py
|
Python
|
scripts/util_funs.py
|
Lucklyric/Expedia-Recommendation
|
d0496fec5305b02d4e17785e6ea5d635e51e92c1
|
[
"Apache-2.0"
] | null | null | null |
scripts/util_funs.py
|
Lucklyric/Expedia-Recommendation
|
d0496fec5305b02d4e17785e6ea5d635e51e92c1
|
[
"Apache-2.0"
] | null | null | null |
scripts/util_funs.py
|
Lucklyric/Expedia-Recommendation
|
d0496fec5305b02d4e17785e6ea5d635e51e92c1
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import tensorflow.contrib as tc
| 14.25
| 31
| 0.824561
| 9
| 57
| 5.222222
| 0.666667
| 0.680851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 57
| 3
| 32
| 19
| 0.979167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bce470e360fbd3c5d9916e3abf37d58a71c8a167
| 3,683
|
py
|
Python
|
lvls/blocks.py
|
Voolshara/Lyceum2
|
6ef3581ccc4f24afdc7bd843c608f90a109c5a6f
|
[
"MIT"
] | null | null | null |
lvls/blocks.py
|
Voolshara/Lyceum2
|
6ef3581ccc4f24afdc7bd843c608f90a109c5a6f
|
[
"MIT"
] | null | null | null |
lvls/blocks.py
|
Voolshara/Lyceum2
|
6ef3581ccc4f24afdc7bd843c608f90a109c5a6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pygame
from pygame import *
PLATFORM_WIDTH = 20
PLATFORM_HEIGHT = 20
PLATFORM_COLOR = "#FF6262"
def load_image(name, directory, colorkey=None):
fullname = os.path.join(directory, name)
koef = 20
# если файл не существует, то выходим
if not os.path.isfile(fullname):
print(f"Файл с изображением '{fullname}' не найден")
sys.exit()
image = pygame.image.load(fullname)
image = pygame.transform.scale(image, (koef, koef))
if colorkey is not None:
image = image.convert()
if colorkey == -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey)
else:
image = image.convert_alpha()
return image
class Platform(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("wall.png", "data/assets", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class Empty(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("empty.png", "data/assets", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class FloorLvl1(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("floor.png", "data/assets/tiles_1lvl", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class EarthLvl1(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("earth.png", "data/assets/tiles_1lvl", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class DescentLeftLvl1(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("descent_left.png", "data/assets/tiles_1lvl", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class DescentRightLvl1(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("descent_right.png", "data/assets/tiles_1lvl", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class PlatformLvl1(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("platform.png", "data/assets/tiles_1lvl", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class ExtraEarthLvl1(sprite.Sprite):
def __init__(self, x, y):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = load_image("extra_earth.png", "data/assets/tiles_1lvl", colorkey=True)
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
| 35.757282
| 93
| 0.673907
| 477
| 3,683
| 4.93501
| 0.178197
| 0.091759
| 0.142736
| 0.183517
| 0.737893
| 0.737893
| 0.737893
| 0.737893
| 0.737893
| 0.737893
| 0
| 0.008826
| 0.200109
| 3,683
| 102
| 94
| 36.107843
| 0.790224
| 0.021178
| 0
| 0.506329
| 0
| 0
| 0.082732
| 0.036646
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113924
| false
| 0
| 0.050633
| 0
| 0.278481
| 0.012658
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bce5a6211dfc9c86ea436016b8bbf66e8e2552b7
| 22,328
|
py
|
Python
|
test_autogalaxy/unit/profiles/mass_profiles/test_mass_profiles.py
|
jonathanfrawley/PyAutoGalaxy_copy
|
1cedbfdcf65020538128163f7d8a7f8e169646e0
|
[
"MIT"
] | null | null | null |
test_autogalaxy/unit/profiles/mass_profiles/test_mass_profiles.py
|
jonathanfrawley/PyAutoGalaxy_copy
|
1cedbfdcf65020538128163f7d8a7f8e169646e0
|
[
"MIT"
] | null | null | null |
test_autogalaxy/unit/profiles/mass_profiles/test_mass_profiles.py
|
jonathanfrawley/PyAutoGalaxy_copy
|
1cedbfdcf65020538128163f7d8a7f8e169646e0
|
[
"MIT"
] | null | null | null |
import math
import autogalaxy as ag
from autogalaxy import exc
import numpy as np
import pytest
def mass_within_radius_of_profile_from_grid_calculation(radius, profile):
mass_total = 0.0
xs = np.linspace(-radius * 1.5, radius * 1.5, 40)
ys = np.linspace(-radius * 1.5, radius * 1.5, 40)
edge = xs[1] - xs[0]
area = edge ** 2
for x in xs:
for y in ys:
eta = profile.grid_to_elliptical_radii(grid=np.array([[x, y]]))
if eta < radius:
mass_total += profile.convergence_func(eta) * area
return mass_total
class TestMassWithin:
def test__compare_to_analytic_and_grid_calculations(self):
sis = ag.mp.SphericalIsothermal(einstein_radius=2.0)
mass = sis.mass_angular_within_circle(radius=2.0)
assert math.pi * sis.einstein_radius * 2.0 == pytest.approx(mass, 1e-3)
sis = ag.mp.SphericalIsothermal(einstein_radius=4.0)
mass = sis.mass_angular_within_circle(radius=4.0)
assert math.pi * sis.einstein_radius * 4.0 == pytest.approx(mass, 1e-3)
sis = ag.mp.SphericalIsothermal(einstein_radius=2.0)
mass_grid = mass_within_radius_of_profile_from_grid_calculation(
radius=1.0, profile=sis
)
mass = sis.mass_angular_within_circle(radius=1.0)
assert mass_grid == pytest.approx(mass, 0.02)
class TestRadiusAverageConvergenceOne:
def test__radius_of_average_convergence(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
assert sis.average_convergence_of_1_radius == pytest.approx(2.0, 1e-4)
sie = ag.mp.EllipticalIsothermal(
centre=(0.0, 0.0), einstein_radius=1.0, elliptical_comps=(0.0, 0.111111)
)
assert sie.average_convergence_of_1_radius == pytest.approx(1.0, 1e-4)
sie = ag.mp.EllipticalIsothermal(
centre=(0.0, 0.0), einstein_radius=3.0, elliptical_comps=(0.0, 0.333333)
)
assert sie.average_convergence_of_1_radius == pytest.approx(3.0, 1e-4)
sie = ag.mp.EllipticalIsothermal(
centre=(0.0, 0.0), einstein_radius=8.0, elliptical_comps=(0.0, 0.666666)
)
assert sie.average_convergence_of_1_radius == pytest.approx(8.0, 1e-4)
class TestDensityBetweenAnnuli:
def test__circular_annuli__sis__analyic_density_agrees(self):
einstein_radius = 1.0
sis = ag.mp.SphericalIsothermal(
centre=(0.0, 0.0), einstein_radius=einstein_radius
)
inner_annuli_radius = 2.0
outer_annuli_radius = 3.0
inner_mass = math.pi * einstein_radius * inner_annuli_radius
outer_mass = math.pi * einstein_radius * outer_annuli_radius
density_between_annuli = sis.density_between_circular_annuli(
inner_annuli_radius=inner_annuli_radius,
outer_annuli_radius=outer_annuli_radius,
)
annuli_area = (np.pi * outer_annuli_radius ** 2.0) - (
np.pi * inner_annuli_radius ** 2.0
)
assert (outer_mass - inner_mass) / annuli_area == pytest.approx(
density_between_annuli, 1e-4
)
def test__circular_annuli__nfw_profile__compare_to_manual_mass(self):
nfw = ag.mp.EllipticalNFW(
centre=(0.0, 0.0), elliptical_comps=(0.111111, 0.0), kappa_s=1.0
)
inner_mass = nfw.mass_angular_within_circle(radius=1.0)
outer_mass = nfw.mass_angular_within_circle(radius=2.0)
density_between_annuli = nfw.density_between_circular_annuli(
inner_annuli_radius=1.0, outer_annuli_radius=2.0
)
annuli_area = (np.pi * 2.0 ** 2.0) - (np.pi * 1.0 ** 2.0)
assert (outer_mass - inner_mass) / annuli_area == pytest.approx(
density_between_annuli, 1e-4
)
class TestNormalizationEinstienRadius:
def test__mass_angular_from_normalization_and_radius(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
mass_angular_from_normalization = sis.mass_angular_from_normalization_and_radius(
normalization=1.0, radius=2.0
)
assert mass_angular_from_normalization == pytest.approx(2.0 * np.pi, 1.0e-2)
mass_angular_from_normalization = sis.mass_angular_from_normalization_and_radius(
normalization=1.0, radius=4.0
)
assert mass_angular_from_normalization == pytest.approx(4.0 * np.pi, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0)
mass_angular_from_normalization = nfw.mass_angular_from_normalization_and_radius(
normalization=2.0, radius=2.0
)
assert mass_angular_from_normalization == pytest.approx(15.19525, 1.0e-4)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
mass_angular_from_normalization = sersic.mass_angular_from_normalization_and_radius(
normalization=2.0, radius=2.0
)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=2.0,
)
assert mass_angular_from_normalization == pytest.approx(28.32431, 1.0e-4)
mass_angular_from_normalization = sersic.mass_angular_from_normalization_and_radius(
normalization=0.1, radius=2.0
)
assert mass_angular_from_normalization == pytest.approx(1.416215, 1.0e-2)
def test__normalization_from_mass_angular_and_radius(self):
sersic = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
normalization = sersic.normalization_from_mass_angular_and_radius(
mass_angular=5.0,
radius=2.0,
normalization_min=0.5,
normalization_max=3.0,
bins=5,
)
assert normalization == pytest.approx(0.79577, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=3.0, scale_radius=1.0)
normalization = nfw.normalization_from_mass_angular_and_radius(
mass_angular=6.35829,
radius=2.0,
normalization_min=0.5,
normalization_max=3.0,
bins=5,
)
assert normalization == pytest.approx(0.83687, 1.0e-2)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
normalization = sersic.normalization_from_mass_angular_and_radius(
mass_angular=2.15403,
radius=2.0,
normalization_min=0.01,
normalization_max=30.0,
bins=5,
)
sersic = sersic.with_new_normalization(normalization=normalization)
assert normalization == pytest.approx(0.152097, 1.0e-2)
with pytest.raises(exc.ProfileException):
sersic.normalization_from_mass_angular_and_radius(
mass_angular=1.0,
radius=2.0,
normalization_min=1e-4,
normalization_max=1e-3,
bins=2,
)
def test__einstein_radius_from_normalization(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
einstein_radius_from_normalization = sis.einstein_radius_from_normalization(
normalization=1.0
)
assert einstein_radius_from_normalization == pytest.approx(1.0, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=1.0, scale_radius=1.0)
einstein_radius_from_normalization = nfw.einstein_radius_from_normalization(
normalization=2.0
)
assert einstein_radius_from_normalization == pytest.approx(2.35829, 1.0e-4)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=1.0
)
einstein_radius_from_profile = sersic.average_convergence_of_1_radius
assert einstein_radius_from_normalization == pytest.approx(
einstein_radius_from_profile, 1.0e-4
)
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=0.1
)
assert einstein_radius_from_normalization == pytest.approx(0.381544, 1.0e-2)
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=1e-4
)
assert einstein_radius_from_normalization == None
einstein_radius_from_normalization = sersic.einstein_radius_from_normalization(
normalization=1e9
)
assert einstein_radius_from_normalization == None
def test__normalization_from_einstein_radius(self):
sersic = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
normalization = sersic.normalization_from_einstein_radius(
einstein_radius=1.0, normalization_min=0.5, normalization_max=3.0, bins=5
)
assert normalization == pytest.approx(1.0, 1.0e-2)
nfw = ag.mp.SphericalNFW(centre=(0.0, 0.0), kappa_s=3.0, scale_radius=1.0)
normalization = nfw.normalization_from_einstein_radius(
einstein_radius=2.35829,
normalization_min=0.5,
normalization_max=3.0,
bins=5,
)
assert normalization == pytest.approx(2.0, 1.0e-2)
sersic = ag.mp.SphericalSersic(
centre=(0.0, 0.0),
intensity=1.0,
effective_radius=1.0,
sersic_index=3.0,
mass_to_light_ratio=1.0,
)
normalization = sersic.normalization_from_einstein_radius(
einstein_radius=2.15403,
normalization_min=0.01,
normalization_max=30.0,
bins=5,
)
assert normalization == pytest.approx(1.0, 1.0e-2)
with pytest.raises(exc.ProfileException):
sersic.normalization_from_einstein_radius(
einstein_radius=1.0,
normalization_min=1e-4,
normalization_max=1e-3,
bins=2,
)
class TestExtractObject:
def test__extract_works(self):
sis = ag.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=2.0)
einstein_radii = sis.extract_attribute(
cls=ag.mp.MassProfile, name="einstein_radius"
)
assert einstein_radii.in_list[0] == 2.0
centres = sis.extract_attribute(cls=ag.mp.MassProfile, name="centre")
assert centres.in_list[0] == (0.0, 0.0)
assert (
sis.extract_attribute(cls=ag.mp.MassProfile, name="einstein_radiu") == None
)
sis.extract_attribute(cls=ag.lp.LightProfile, name="einstein_radius")
class TestRegression:
def test__centre_of_profile_in_right_place(self):
grid = ag.Grid2D.uniform(shape_native=(7, 7), pixel_scales=1.0)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(2.0, 1.0), einstein_radius=1.0
)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] > 0
assert deflections.native[2, 4, 0] < 0
assert deflections.native[1, 4, 1] > 0
assert deflections.native[1, 3, 1] < 0
mass_profile = ag.mp.SphericalIsothermal(centre=(2.0, 1.0), einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
mass_profile = ag.mp.SphericalIsothermal(centre=(2.0, 1.0), einstein_radius=1.0)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] > 0
assert deflections.native[2, 4, 0] < 0
assert deflections.native[1, 4, 1] > 0
assert deflections.native[1, 3, 1] < 0
grid = ag.Grid2DIterate.uniform(
shape_native=(7, 7),
pixel_scales=1.0,
fractional_accuracy=0.99,
sub_steps=[2, 4],
)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(2.0, 1.0), einstein_radius=1.0
)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] >= 0
assert deflections.native[2, 4, 0] <= 0
assert deflections.native[1, 4, 1] >= 0
assert deflections.native[1, 3, 1] <= 0
mass_profile = ag.mp.SphericalIsothermal(centre=(2.0, 1.0), einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
max_indexes = np.unravel_index(
convergence.native.argmax(), convergence.shape_native
)
assert max_indexes == (1, 4)
potential = mass_profile.potential_from_grid(grid=grid)
max_indexes = np.unravel_index(
potential.native.argmin(), potential.shape_native
)
assert max_indexes == (1, 4)
deflections = mass_profile.deflections_from_grid(grid=grid)
assert deflections.native[1, 4, 0] >= 0
assert deflections.native[2, 4, 0] <= 0
assert deflections.native[1, 4, 1] >= 0
assert deflections.native[1, 3, 1] <= 0
class TestDecorators:
def test__grid_iterate_in__iterates_grid_result_correctly(self, gal_x1_mp):
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=1.0, sub_steps=[2]
)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(0.08, 0.08), einstein_radius=1.0
)
deflections = mass_profile.deflections_from_grid(grid=grid)
mask_sub_2 = mask.mask_new_sub_size_from_mask(mask=mask, sub_size=2)
grid_sub_2 = ag.Grid2D.from_mask(mask=mask_sub_2)
deflections_sub_2 = mass_profile.deflections_from_grid(
grid=grid_sub_2
).slim_binned
assert deflections == pytest.approx(deflections_sub_2, 1.0e-6)
grid = ag.Grid2DIterate.from_mask(
mask=mask, fractional_accuracy=0.99, sub_steps=[2, 4, 8]
)
mass_profile = ag.mp.EllipticalIsothermal(
centre=(0.08, 0.08), einstein_radius=1.0
)
deflections = mass_profile.deflections_from_grid(grid=grid)
mask_sub_4 = mask.mask_new_sub_size_from_mask(mask=mask, sub_size=4)
grid_sub_4 = ag.Grid2D.from_mask(mask=mask_sub_4)
deflections_sub_4 = mass_profile.deflections_from_grid(
grid=grid_sub_4
).slim_binned
assert deflections[0, 0] == deflections_sub_4[0, 0]
mask_sub_8 = mask.mask_new_sub_size_from_mask(mask=mask, sub_size=8)
grid_sub_8 = ag.Grid2D.from_mask(mask=mask_sub_8)
deflections_sub_8 = mass_profile.deflections_from_grid(
grid=grid_sub_8
).slim_binned
assert deflections[4, 0] == deflections_sub_8[4, 0]
def test__grid_interpolate_in__convergence__interpolates_based_on_intepolate_config(
self,
):
# `False` in interpolate.ini
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2D.from_mask(mask=mask)
grid_interpolate = ag.Grid2DInterpolate.from_mask(
mask=mask, pixel_scales_interp=0.1
)
mass_profile = ag.mp.EllipticalIsothermal(einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
convergence_no_interpolate = mass_profile.convergence_from_grid(
grid=grid_interpolate
)
assert (convergence == convergence_no_interpolate).all()
# `False` in interpolate.ini
mass_profile = ag.mp.SphericalIsothermal(einstein_radius=1.0)
convergence = mass_profile.convergence_from_grid(grid=grid)
convergence_interpolate = mass_profile.convergence_from_grid(
grid=grid_interpolate
)
assert (convergence != convergence_interpolate).all()
array_interp = mass_profile.convergence_from_grid(
grid=grid_interpolate.grid_interp
)
interpolated_array = grid_interpolate.interpolated_array_from_array_interp(
array_interp=array_interp
)
assert (convergence_interpolate == interpolated_array).all()
def test__grid_interpolate_in__potential__interpolates_based_on_intepolate_config(
self,
):
# `False` in interpolate.ini
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2D.from_mask(mask=mask)
grid_interpolate = ag.Grid2DInterpolate.from_mask(
mask=mask, pixel_scales_interp=0.1
)
mass_profile = ag.mp.EllipticalIsothermal(einstein_radius=1.0)
potential = mass_profile.potential_from_grid(grid=grid)
potential_no_interpolate = mass_profile.potential_from_grid(
grid=grid_interpolate
)
assert (potential == potential_no_interpolate).all()
# `False` in interpolate.ini
mass_profile = ag.mp.SphericalIsothermal(einstein_radius=1.0)
potential = mass_profile.potential_from_grid(grid=grid)
potential_interpolate = mass_profile.potential_from_grid(grid=grid_interpolate)
assert (potential != potential_interpolate).all()
array_interp = mass_profile.potential_from_grid(
grid=grid_interpolate.grid_interp
)
interpolated_array = grid_interpolate.interpolated_array_from_array_interp(
array_interp=array_interp
)
assert (potential_interpolate == interpolated_array).all()
def test__grid_interpolate_in__deflections__interpolates_based_on_intepolate_config(
self,
):
# `False` in interpolate.ini
mask = ag.Mask2D.manual(
mask=[
[True, True, True, True, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, False, False, False, True],
[True, True, True, True, True],
],
pixel_scales=(1.0, 1.0),
)
grid = ag.Grid2D.from_mask(mask=mask)
grid_interpolate = ag.Grid2DInterpolate.from_mask(
mask=mask, pixel_scales_interp=0.1
)
mass_profile = ag.mp.EllipticalIsothermal(einstein_radius=1.0)
deflections = mass_profile.deflections_from_grid(grid=grid)
deflections_no_interpolate = mass_profile.deflections_from_grid(
grid=grid_interpolate
)
assert (deflections == deflections_no_interpolate).all()
# `False` in interpolate.ini
mass_profile = ag.mp.SphericalIsothermal(einstein_radius=1.0)
deflections_interpolate = mass_profile.deflections_from_grid(
grid=grid_interpolate
)
grid_interp = mass_profile.deflections_from_grid(
grid=grid_interpolate.grid_interp
)
interpolated_grid = grid_interpolate.interpolated_grid_from_grid_interp(
grid_interp=grid_interp
)
assert (deflections_interpolate == interpolated_grid).all()
| 34.036585
| 93
| 0.612057
| 2,669
| 22,328
| 4.833271
| 0.068565
| 0.012713
| 0.010698
| 0.03845
| 0.854031
| 0.81845
| 0.79031
| 0.746899
| 0.70938
| 0.639302
| 0
| 0.047583
| 0.29313
| 22,328
| 655
| 94
| 34.08855
| 0.769752
| 0.007211
| 0
| 0.506356
| 0
| 0
| 0.002325
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 1
| 0.03178
| false
| 0
| 0.010593
| 0
| 0.059322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c0e5be3c8a2a2321ebade2962320bd53e890a42
| 11,615
|
py
|
Python
|
HLTriggerOffline/SUSYBSM/python/SUSYBSM_inclusiveHT_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
HLTriggerOffline/SUSYBSM/python/SUSYBSM_inclusiveHT_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
HLTriggerOffline/SUSYBSM/python/SUSYBSM_inclusiveHT_cff.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
SUSY_HLT_InclusiveHT_800 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT800_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT800Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHTo800oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT800_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_900 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT900_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT900Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHTo900oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT900_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux125 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT125_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT125Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux125oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT125_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux200 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT200_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT200Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux200oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT200_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux250 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT250_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT250Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux250oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT250_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux300 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT300_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT300Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux300oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT300_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux350 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT350_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT350Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux350oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT350_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux400 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT400_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT400Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux400oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT400_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux475 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT475_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT475Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux475oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT475_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT_aux600 = cms.EDAnalyzer("SUSY_HLT_InclusiveHT",
trigSummary = cms.InputTag("hltTriggerSummaryAOD"),
pfMETCollection = cms.InputTag("pfMet"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'),
TriggerPath = cms.string('HLT_PFHT600_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu24_eta2p1_IterTrk02_v'),
TriggerFilter = cms.InputTag('hltPFHT600Jet30', '', 'HLT'), #the last filter in the path
PtThrJet = cms.untracked.double(30.0),
EtaThrJet = cms.untracked.double(3.0)
)
SUSYoHLToInclusiveHToAux600oPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring("HLT/SUSYBSM/HLT_PFHT600_v"),
efficiency = cms.vstring(
"pfMetTurnOn_eff 'Efficiency vs PFMET' pfMetTurnOn_num pfMetTurnOn_den",
"pfHTTurnOn_eff 'Efficiency vs PFHT' pfHTTurnOn_num pfHTTurnOn_den"
),
resolution = cms.vstring("")
)
SUSY_HLT_InclusiveHT = cms.Sequence(SUSY_HLT_InclusiveHT_aux125 +
SUSY_HLT_InclusiveHT_aux200 +
SUSY_HLT_InclusiveHT_aux250 +
SUSY_HLT_InclusiveHT_aux300 +
SUSY_HLT_InclusiveHT_aux350 +
SUSY_HLT_InclusiveHT_aux400 +
SUSY_HLT_InclusiveHT_aux475 +
SUSY_HLT_InclusiveHT_aux600 +
SUSY_HLT_InclusiveHT_800 +
SUSY_HLT_InclusiveHT_900
)
SUSY_HLT_InclusiveHT_POSTPROCESSING = cms.Sequence(SUSYoHLToInclusiveHToAux125oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux200oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux250oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux300oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux350oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux400oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux475oPOSTPROCESSING +
SUSYoHLToInclusiveHToAux600oPOSTPROCESSING +
SUSYoHLToInclusiveHTo800oPOSTPROCESSING +
SUSYoHLToInclusiveHTo900oPOSTPROCESSING
)
| 46.834677
| 95
| 0.72415
| 1,103
| 11,615
| 7.421578
| 0.089755
| 0.080625
| 0.070364
| 0.024432
| 0.808698
| 0.808698
| 0.808698
| 0.808698
| 0.808698
| 0.724408
| 0
| 0.037148
| 0.165648
| 11,615
| 247
| 96
| 47.024292
| 0.807553
| 0.023246
| 0
| 0.580357
| 0
| 0
| 0.283194
| 0.048522
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008929
| 0
| 0.008929
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c57b7c3ac565795fedc00ddadef2d00d871e1a0
| 2,759
|
py
|
Python
|
PA/coursework_2/test_simulator.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | null | null | null |
PA/coursework_2/test_simulator.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | 1
|
2022-02-23T07:34:53.000Z
|
2022-02-23T07:34:53.000Z
|
PA/coursework_2/test_simulator.py
|
easyCZ/UoE-Projects
|
7651c8caf329c4f7b4562eba441bfc24124cfcfd
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from protocols import MSI, MESI
from models import State, Action
class ProtocolTest(unittest.TestCase):
def assert_remote(self, current_state, action, new_state):
state = self.sut.remote(current_state, action)
return self.assertEqual(new_state, state)
class MSITest(ProtocolTest):
def setUp(self):
self.sut = MSI()
def assert_local(self, current_state, action, new_state):
state = self.sut.local(current_state, action)
return self.assertEqual(new_state, state)
def test_local_from_invalid_on_read_miss(self):
self.assert_local(State.invalid, Action.read_miss, State.shared)
def test_local_from_shared_on_read_hit(self):
self.assert_local(State.shared, Action.read_hit, State.shared)
def test_local_from_shared_on_write_hit(self):
self.assert_local(State.shared, Action.write_hit, State.modified)
def test_local_from_shared_on_write_miss(self):
self.assert_local(State.shared, Action.write_miss, State.modified)
def test_local_modified_on_read_hit(self):
self.assert_local(State.modified, Action.read_hit, State.modified)
def test_local_modified_on_write_hit(self):
self.assert_local(State.modified, Action.write_hit, State.modified)
def test_local_invalid_on_write_miss(self):
self.assert_local(State.invalid, Action.write_miss, State.modified)
# Other CPUs
def test_remote_shared_on_write_miss(self):
self.assert_remote(State.shared, Action.write_miss, State.invalid)
def test_remote_shared_on_read_miss(self):
self.assert_remote(State.shared, Action.read_miss, State.shared)
def test_remote_modified_on_read_miss(self):
self.assert_remote(State.modified, Action.read_miss, State.shared)
def test_remote_modified_on_write_miss(self):
self.assert_remote(State.modified, Action.write_miss, State.invalid)
class MESITest(ProtocolTest):
def setUp(self):
self.sut = MESI()
def test_remote_modified_on_write_miss(self):
self.assert_remote(State.modified, Action.write_miss, State.invalid)
def test_remote_modified_on_read_miss(self):
self.assert_remote(State.modified, Action.read_miss, State.shared)
def test_remote_exclusive_on_write_miss(self):
self.assert_remote(State.exclusive, Action.write_miss, State.invalid)
def test_remote_exclusive_on_read_miss(self):
self.assert_remote(State.exclusive, Action.read_miss, State.shared)
def test_remote_shared_on_read_miss(self):
self.assert_remote(State.shared, Action.read_miss, State.shared)
def test_remote_shared_on_write_miss(self):
self.assert_remote(State.shared, Action.write_miss, State.invalid)
| 35.831169
| 77
| 0.752447
| 390
| 2,759
| 4.982051
| 0.094872
| 0.07823
| 0.122491
| 0.120432
| 0.885744
| 0.865157
| 0.826557
| 0.783325
| 0.4807
| 0.383942
| 0
| 0
| 0.158391
| 2,759
| 76
| 78
| 36.302632
| 0.836779
| 0.003625
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.42
| 1
| 0.42
| false
| 0
| 0.06
| 0
| 0.58
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c68d622a07b622aee4f9ce94a4d1a6e69180030
| 139
|
py
|
Python
|
tictactoe/repositories/match.py
|
pitzer42/nano_tcg
|
c984b253b8a53a707460aac21c10f140d16d902e
|
[
"MIT"
] | 1
|
2020-09-30T21:03:37.000Z
|
2020-09-30T21:03:37.000Z
|
tictactoe/repositories/match.py
|
pitzer42/nano_tcg
|
c984b253b8a53a707460aac21c10f140d16d902e
|
[
"MIT"
] | null | null | null |
tictactoe/repositories/match.py
|
pitzer42/nano_tcg
|
c984b253b8a53a707460aac21c10f140d16d902e
|
[
"MIT"
] | null | null | null |
from abc import ABC
from gloop.repositories.match import MatchRepository
class TicTacToeMatchRepository(MatchRepository, ABC):
pass
| 17.375
| 53
| 0.820144
| 15
| 139
| 7.6
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136691
| 139
| 7
| 54
| 19.857143
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d5cb1e7c2368a83e0da890df8817d478bf726a1c
| 5,419
|
py
|
Python
|
resrc/list/tests/integration_tests/views_test.py
|
ignatandrei/resrc
|
5b88e3cfbc638e5f98cf7bfe6f4a5757840a2565
|
[
"MIT"
] | 1
|
2015-11-05T19:50:19.000Z
|
2015-11-05T19:50:19.000Z
|
resrc/list/tests/integration_tests/views_test.py
|
sergiolimajr/resrc
|
a0714b3ae989821ebe4c5a7b5a2235a85bfa16a9
|
[
"MIT"
] | 2
|
2020-08-04T18:08:04.000Z
|
2021-02-02T22:57:59.000Z
|
resrc/list/tests/integration_tests/views_test.py
|
sergiolimajr/resrc
|
a0714b3ae989821ebe4c5a7b5a2235a85bfa16a9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-:
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import authenticate
from django.http import Http404
from resrc.link.tests.factories import LinkFactory
from resrc.list.tests.factories import ListFactory
from resrc.tests.factories import UserFactory
class ListViewTestCase(TestCase):
def test_single_view(self):
alist = ListFactory()
alist.save()
resp = self.client.get(reverse('list-single', kwargs={'list_pk': alist.pk}))
self.assertEqual(resp.status_code, 302)
resp = self.client.get(reverse('list-single', kwargs={'list_pk': 77}))
self.assertEqual(resp.status_code, 404)
def test_single_slug_view(self):
alist = ListFactory()
alist.save()
resp = self.client.get(reverse('list-single-slug', kwargs={'list_pk': alist.pk, 'list_slug': alist.slug}))
self.assertEqual(resp.status_code, 200)
user = UserFactory()
user.save()
self.client.login(username=user.username, password='test123')
resp = self.client.get(reverse('list-single-slug', kwargs={'list_pk': alist.pk, 'list_slug': alist.slug}))
self.assertEqual(resp.status_code, 200)
resp = self.client.get(reverse('list-single-slug', kwargs={'list_pk': alist.pk, 'list_slug': alist.slug + "x"}))
self.assertEqual(resp.status_code, 404)
def test_new_list_view(self):
resp = self.client.get(reverse('new-list'))
self.assertEqual(resp.status_code, 302)
user = UserFactory()
user.save()
self.client.login(username=user.username, password='test123')
resp = self.client.get(reverse('new-list'))
self.assertEqual(resp.status_code, 200)
def test_ajax_add_to_list_or_create(self):
import simplejson
resp = self.client.get(reverse('ajax-add-to-list-or-create'))
# not authenticated
self.assertEqual(resp.content, simplejson.dumps({'result': 'fail'}))
user = UserFactory()
user.save()
self.client.login(username=user.username, password='test123')
# authenticated but no post data
resp = self.client.get(reverse('ajax-add-to-list-or-create'))
self.assertEqual(resp.content, simplejson.dumps({'result': 'fail'}))
link = LinkFactory()
link.save()
link2 = LinkFactory()
link2.title = 'new link'
link2.save()
# authenticated and posting fake content
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link.pk,
't': 'haxx0r',
})
self.assertEqual(resp.status_code, 404)
# authenticated and adding link to default nonexistant bookmark list
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link.pk,
't': 'bookmark',
})
self.assertEqual(resp.content, simplejson.dumps({'result': 'added'}))
# authenticated and adding link to default existing bookmark list
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link2.pk,
't': 'bookmark',
})
self.assertEqual(resp.content, simplejson.dumps({'result': 'added'}))
# authenticated and adding link to default nonexistant reading list
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link.pk,
't': 'toread',
})
self.assertEqual(resp.content, simplejson.dumps({'result': 'added'}))
# authenticated and adding link to default existing reading list
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link2.pk,
't': 'toread',
})
self.assertEqual(resp.content, simplejson.dumps({'result': 'added'}))
# authenticated and removing link from default existing reading list
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link2.pk,
't': 'toread',
})
self.assertEqual(resp.content, simplejson.dumps({'result': 'removed'}))
# authenticated and adding/remove link to un/existing own list list
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link.pk,
'ls': 43212234
})
self.assertEqual(resp.status_code, 404)
alist = ListFactory()
alist.save()
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link.pk,
'ls': alist.pk
})
self.assertEqual(resp.content, simplejson.dumps({'result': 'added'}))
resp = self.client.post(reverse('ajax-add-to-list-or-create'), {
'lk': link.pk,
'ls': alist.pk
})
self.assertEqual(resp.content, simplejson.dumps({'result': 'removed'}))
def test_ajax_own_lists(self):
link = LinkFactory()
link.save()
link_pk = link.pk
resp = self.client.get(reverse('ajax-own-lists', kwargs={'link_pk': link.pk}))
self.assertEqual(resp.status_code, 404)
user = UserFactory()
user.save()
self.client.login(username=user.username, password='test123')
resp = self.client.get(reverse('ajax-own-lists', kwargs={'link_pk': link.pk}))
self.assertEqual(resp.status_code, 200)
| 38.161972
| 120
| 0.613951
| 651
| 5,419
| 5.050691
| 0.141321
| 0.072993
| 0.085158
| 0.047445
| 0.804136
| 0.802616
| 0.769161
| 0.736618
| 0.676095
| 0.676095
| 0
| 0.016031
| 0.240266
| 5,419
| 141
| 121
| 38.432624
| 0.782609
| 0.092822
| 0
| 0.785047
| 0
| 0
| 0.13945
| 0.058308
| 0
| 0
| 0
| 0
| 0.186916
| 1
| 0.046729
| false
| 0.037383
| 0.074766
| 0
| 0.130841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d5dfa72ecf282130d6dc8633656ec5c32a1812db
| 5,760
|
py
|
Python
|
tests/unit/adapters/netbox_api/models/test_netbox_vlan.py
|
anirudhkamath/network-importer
|
13a32270ac838745433bd2b859697e657f95c013
|
[
"Apache-2.0"
] | 88
|
2020-12-03T16:07:47.000Z
|
2022-03-21T16:14:24.000Z
|
tests/unit/adapters/netbox_api/models/test_netbox_vlan.py
|
anirudhkamath/network-importer
|
13a32270ac838745433bd2b859697e657f95c013
|
[
"Apache-2.0"
] | 62
|
2020-12-03T03:49:13.000Z
|
2022-03-21T22:02:15.000Z
|
tests/unit/adapters/netbox_api/models/test_netbox_vlan.py
|
anirudhkamath/network-importer
|
13a32270ac838745433bd2b859697e657f95c013
|
[
"Apache-2.0"
] | 23
|
2020-12-03T03:37:53.000Z
|
2022-03-30T16:28:35.000Z
|
"""test for NetboxVlan model."""
import os
import yaml
import pytest
import pynetbox
from diffsync.exceptions import ObjectNotFound
from network_importer.adapters.netbox_api.models import NetboxVlan, NetboxDevice
ROOT = os.path.abspath(os.path.dirname(__file__))
FIXTURE_28 = "../fixtures/netbox_28"
FIXTURE_29 = "../fixtures/netbox_29"
def test_vlan_create_from_pynetbox(netbox_api_base):
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_29}/vlan_101_no_tag.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
item = NetboxVlan.create_from_pynetbox(diffsync=netbox_api_base, obj=pnb, site_name="nyc")
assert isinstance(item, NetboxVlan) is True
assert item.remote_id == 1
assert item.vid == 101
assert item.associated_devices == []
def test_vlan_create_from_pynetbox_with_tags(netbox_api_base):
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_29}/vlan_101_tags_01.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
netbox_api_base.add(NetboxDevice(name="devA", site_name="nyc", remote_id=30))
item = NetboxVlan.create_from_pynetbox(diffsync=netbox_api_base, obj=pnb, site_name="nyc")
assert isinstance(item, NetboxVlan) is True
assert item.remote_id == 1
assert item.vid == 101
assert item.associated_devices == ["devA"]
# Try again with one additional device in the inventory
netbox_api_base.add(NetboxDevice(name="devB", site_name="nyc", remote_id=31))
item = NetboxVlan.create_from_pynetbox(diffsync=netbox_api_base, obj=pnb, site_name="nyc")
assert isinstance(item, NetboxVlan) is True
assert item.remote_id == 1
assert item.vid == 101
assert item.associated_devices == ["devA", "devB"]
def test_translate_attrs_for_netbox_no_attrs(netbox_api_base):
vlan = NetboxVlan(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
params = vlan.translate_attrs_for_netbox({})
assert "name" in params
assert params["name"] == "vlan-100"
assert "site" in params
assert params["site"] == 10
assert "tags" not in params
def test_translate_attrs_for_netbox_with_partial_attrs(netbox_api_base):
vlan = NetboxVlan(vid=100, name="MYVLAN", site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
netbox_api_base.add(NetboxDevice(name="dev1", site_name="HQ", remote_id=32, device_tag_id=12))
netbox_api_base.add(NetboxDevice(name="dev2", site_name="HQ", remote_id=33, device_tag_id=13))
params = vlan.translate_attrs_for_netbox({"associated_devices": ["dev1", "dev2"]})
assert "name" not in params
assert "site" in params
assert params["site"] == 10
assert "tags" in params
assert sorted(params["tags"]) == [12, 13]
def test_translate_attrs_for_netbox_with_attrs(netbox_api_base):
vlan = NetboxVlan(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
netbox_api_base.add(NetboxDevice(name="dev1", site_name="HQ", remote_id=32, device_tag_id=12))
netbox_api_base.add(NetboxDevice(name="dev2", site_name="HQ", remote_id=33, device_tag_id=13))
params = vlan.translate_attrs_for_netbox({"name": "VOICE", "associated_devices": ["dev1", "dev2"]})
assert "name" in params
assert params["name"] == "VOICE"
assert "site" in params
assert params["site"] == 10
assert "tags" in params
assert sorted(params["tags"]) == [12, 13]
def test_translate_attrs_for_netbox_with_missing_devices(netbox_api_base):
vlan = NetboxVlan(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
netbox_api_base.add(NetboxDevice(name="dev1", site_name="HQ", remote_id=32, device_tag_id=12))
params = vlan.translate_attrs_for_netbox({"name": "VOICE", "associated_devices": ["dev1", "dev2"]})
assert "name" in params
assert params["name"] == "VOICE"
assert "site" in params
assert params["site"] == 10
assert "tags" in params
assert sorted(params["tags"]) == [12]
def test_translate_attrs_for_netbox_missing_site(netbox_api_base):
vlan = NetboxVlan(vid=100, site_name="NOTPRESENT", remote_id=30)
netbox_api_base.add(vlan)
with pytest.raises(ObjectNotFound):
vlan.translate_attrs_for_netbox({})
assert True
def test_update_clean_tags_no_incoming_tags(netbox_api_base):
vlan = NetboxVlan(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_29}/vlan_101_tags_01.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
params = vlan.translate_attrs_for_netbox({"name": "VOICE"})
clean_params = vlan.update_clean_tags(nb_params=params, obj=pnb)
assert "tags" not in clean_params
def test_update_clean_tags_with_incoming_tags(netbox_api_base):
vlan = NetboxVlan(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
netbox_api_base.add(NetboxDevice(name="dev1", site_name="HQ", remote_id=32, device_tag_id=12))
netbox_api_base.add(NetboxDevice(name="dev2", site_name="HQ", remote_id=33, device_tag_id=13))
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_29}/vlan_101_tags_01.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
params = vlan.translate_attrs_for_netbox({"name": "VOICE", "associated_devices": ["dev1", "dev2"]})
clean_params = vlan.update_clean_tags(nb_params=params, obj=pnb)
assert "tags" in clean_params
assert sorted(clean_params["tags"]) == [1, 2, 3, 12, 13]
| 36
| 103
| 0.722396
| 849
| 5,760
| 4.621908
| 0.129564
| 0.066514
| 0.092762
| 0.06524
| 0.866972
| 0.846075
| 0.773955
| 0.756626
| 0.739297
| 0.728848
| 0
| 0.040437
| 0.141319
| 5,760
| 159
| 104
| 36.226415
| 0.752932
| 0.014063
| 0
| 0.625
| 0
| 0
| 0.11493
| 0.036136
| 0
| 0
| 0
| 0
| 0.365385
| 1
| 0.086538
| false
| 0
| 0.057692
| 0
| 0.144231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d5e33fbae2604e8fef3fc177cf71cf157376183e
| 66
|
py
|
Python
|
media_preprocessor/__init__.py
|
harshh-mahajan/media_preprocessor
|
5943ab6d6cde27200850398e40717d7b55646ec8
|
[
"MIT"
] | null | null | null |
media_preprocessor/__init__.py
|
harshh-mahajan/media_preprocessor
|
5943ab6d6cde27200850398e40717d7b55646ec8
|
[
"MIT"
] | null | null | null |
media_preprocessor/__init__.py
|
harshh-mahajan/media_preprocessor
|
5943ab6d6cde27200850398e40717d7b55646ec8
|
[
"MIT"
] | 1
|
2020-11-22T16:41:09.000Z
|
2020-11-22T16:41:09.000Z
|
from media_preprocessor.media_preprocessor import preprocess_tool
| 33
| 65
| 0.924242
| 8
| 66
| 7.25
| 0.75
| 0.586207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 66
| 1
| 66
| 66
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d5ec10cabbd7f27a5cb76bd1dca368f944c99be3
| 1,349
|
py
|
Python
|
test/processors/test_google.py
|
mgor/pydoc-markdown
|
b15127e3c643976e71a10c7fa4d03297ee616542
|
[
"MIT"
] | null | null | null |
test/processors/test_google.py
|
mgor/pydoc-markdown
|
b15127e3c643976e71a10c7fa4d03297ee616542
|
[
"MIT"
] | null | null | null |
test/processors/test_google.py
|
mgor/pydoc-markdown
|
b15127e3c643976e71a10c7fa4d03297ee616542
|
[
"MIT"
] | null | null | null |
from pydoc_markdown.contrib.processors.google import GoogleProcessor
from . import assert_processor_result
def test_google_processor(processor=None):
assert_processor_result(
processor or GoogleProcessor(),
"""
Args:
s (str): A string.
b (int): An int.
Returns:
any: Something funny.
""",
"""
**Arguments**:
- `s` _str_ - A string.
- `b` _int_ - An int.
**Returns**:
- `any` - Something funny.
""",
)
assert_processor_result(
processor or GoogleProcessor(),
"""
Args:
s (str): A string.
And the description
takes
multiple lines.
b (int): An int.
Returns:
any: Something funny.
""",
"""
**Arguments**:
- `s` _str_ - A string.
And the description
takes
multiple lines.
- `b` _int_ - An int.
**Returns**:
- `any` - Something funny.
""",
)
assert_processor_result(
processor or GoogleProcessor(),
"""
Example:
```py
scanner = ListScanner(lst)
for value in scanner.safe_iter():
if some_condition(value):
value = scanner.advance()
```
""",
"""
**Example**:
```py
scanner = ListScanner(lst)
for value in scanner.safe_iter():
if some_condition(value):
value = scanner.advance()
```
""",
)
| 17.075949
| 68
| 0.556709
| 137
| 1,349
| 5.313869
| 0.350365
| 0.082418
| 0.115385
| 0.06044
| 0.825549
| 0.825549
| 0.825549
| 0.825549
| 0.825549
| 0.825549
| 0
| 0
| 0.30467
| 1,349
| 78
| 69
| 17.294872
| 0.776119
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9112c14093d4c5fcc9f2c37dc7258c632d54886d
| 32
|
py
|
Python
|
codes.py
|
Lauracruces/Python
|
a485546f7c7f1206a9cdf2932c92b91c2bc73ee1
|
[
"MIT"
] | null | null | null |
codes.py
|
Lauracruces/Python
|
a485546f7c7f1206a9cdf2932c92b91c2bc73ee1
|
[
"MIT"
] | null | null | null |
codes.py
|
Lauracruces/Python
|
a485546f7c7f1206a9cdf2932c92b91c2bc73ee1
|
[
"MIT"
] | null | null | null |
#pandas
import pandas as pandas
| 10.666667
| 23
| 0.8125
| 5
| 32
| 5.2
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 2
| 24
| 16
| 0.962963
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
911637d58a61f6403ddb0dc18bda8db8c37b17b1
| 22,260
|
py
|
Python
|
test/test_onnxrt_operators.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
test/test_onnxrt_operators.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
test/test_onnxrt_operators.py
|
intelkevinputnam/lpot-docs
|
1ff32b4d89074a6bd133ba531f7c0cea3b73152f
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import unittest
import copy
import onnx
import numpy as np
from onnx import helper, TensorProto, numpy_helper, onnx_pb
from onnxruntime.quantization.quant_utils import QuantizationMode
from lpot.adaptor.ox_utils.onnx_quantizer import ONNXQuantizer
import onnxruntime as ort
class TestAdaptorONNXRT(unittest.TestCase):
qlinear_backend = QuantizationMode.QLinearOps
integer_backend = QuantizationMode.IntegerOps
q_config = {"weight":{'dtype': 3,
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'activation':{'dtype': 2,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}
}
@classmethod
def setUpClass(cls):
os.makedirs('./onnxrt_test')
@classmethod
def tearDownClass(cls):
shutil.rmtree("./onnxrt_test", ignore_errors=True)
def static_test(self, model, q_config, quantize_params, quantizable_op_types):
quantizer = ONNXQuantizer(copy.deepcopy(model),
q_config,
self.qlinear_backend,
True,
quantize_params,
quantizable_op_types)
quantizer.quantize_model()
assert quantizer.model.model
def dynamic_test(self, model, q_config, quantize_params, quantizable_op_types):
quantizer = ONNXQuantizer(copy.deepcopy(model),
q_config,
self.integer_backend,
False,
quantize_params,
quantizable_op_types)
quantizer.quantize_model()
assert quantizer.model.model
def test_conv(self):
for op in ['Conv', 'FusedConv']:
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 1, 5, 1])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 1])
conv_node = onnx.helper.make_node(op, ['A', 'B', 'C'], ['D'],
name=op,
kernel_shape=[3, 3],
pads=[1, 1, 1, 1])
graph = helper.make_graph([conv_node], 'test_graph_1', [A, B, C], [D])
model = helper.make_model(graph)
q_config = {op: self.q_config},
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)],
"D": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = [op]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_matmul(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 5, 1])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 1, 5, 1])
matmul_node = onnx.helper.make_node('MatMul', ['A', 'B'], ['C'], name='Matmul')
graph = helper.make_graph([matmul_node], 'test_graph_1', [A, B], [C])
model = helper.make_model(graph)
q_config = {"Matmul": self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Matmul"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
q_config = {"Matmul": {"weight":{'dtype': 3,
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'activation':{'dtype': 3,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}}}
quantize_params = {}
self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
def test_attention(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 5, 5])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 1, 5, 5])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
node = onnx.helper.make_node('Attention', ['A', 'B', 'C'], ['D'], name='Attention')
graph = helper.make_graph([node], 'test_graph_1', [A, B, C], [D])
model = helper.make_model(graph)
q_config = {"Attention": self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)],
"D": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Attention"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
def test_gather(self):
a_value = np.random.randn(100, 4).astype(np.float32)
A_init = helper.make_tensor('A', TensorProto.FLOAT, [100, 4],
a_value.reshape(400).tolist())
b_value = np.random.randint(2, size=(1, 10)).astype(np.int32)
B_init = helper.make_tensor('B', TensorProto.INT32, [1, 10],
b_value.reshape(10).tolist())
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [100, 4])
B = helper.make_tensor_value_info('B', TensorProto.INT32, [1, 10])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [10, 4])
node = onnx.helper.make_node('Gather', ['A', 'B'], ['C'], name='Gather')
graph = helper.make_graph([node], 'test_graph_1', [A, B], [C], [A_init, B_init])
model = helper.make_model(graph)
q_config = {'Gather': {"weight":{'dtype': 3,
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'activation':{'dtype': 2,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}
}}
quantize_params = {"A": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Gather"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
graph = helper.make_graph([node], 'test_graph_1', [A, B], [C])
model = helper.make_model(graph)
q_config = {'Gather': {"weight":{'dtype': 3,
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'activation':{'dtype': 2,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}
}}
quantize_params = {}
self.dynamic_test(model, q_config, quantize_params, quantizable_op_types)
def test_split(self):
a_value = np.random.randn(100, 4).astype(np.float32)
A_init = helper.make_tensor('A', TensorProto.FLOAT, [100, 4],
a_value.reshape(400).tolist())
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [100, 4])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [50, 4])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [50, 4])
node = onnx.helper.make_node('Split', ['A'], ['B', 'C'], name='Split')
graph = helper.make_graph([node], 'test_graph_1', [A], [B, C], [A_init])
model = helper.make_model(graph)
q_config = {'Split': {"weight":{'dtype': 3,
'algorithm': 'minmax',
'scheme':'sym',
'granularity': 'per_tensor'},
'activation':{'dtype': 2,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}
}}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Split"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_pad(self):
b_value = np.array([0, 1, 1, 0, 1, 1]).astype(np.int64)
B_init = helper.make_tensor('B', TensorProto.INT64, [6],
b_value.reshape(6).tolist())
B = helper.make_tensor_value_info('B', TensorProto.INT64, [6])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 7, 7])
d_value = np.random.randn(1).astype(np.float32)
D_init = helper.make_tensor('D', TensorProto.FLOAT, [1],
d_value.reshape(1).tolist())
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1])
e_value = np.random.randn(1, 5, 5).astype(np.float32)
E_init = helper.make_tensor('E', TensorProto.FLOAT, [1, 5, 5],
e_value.reshape(25).tolist())
E = helper.make_tensor_value_info('E', TensorProto.FLOAT, [1, 1, 5, 5])
f_value = np.random.randn(1, 3, 3).astype(np.float32)
F_init = helper.make_tensor('F', TensorProto.FLOAT, [1, 3, 3],
f_value.reshape(9).tolist())
F = helper.make_tensor_value_info('F', TensorProto.FLOAT, [1, 1, 3, 3])
for mode in ["constant", "edge", "reflect", "constant_value", "constant_value_wo_init"]:
conv_node = onnx.helper.make_node('Conv', ['E', 'F'], ['A'],
name='Conv',
kernel=[3, 3],
padding=[1, 1, 1, 1])
if mode == "constant_value":
node = onnx.helper.make_node('Pad', ['A', 'B', 'D'], ['C'], name='Pad', mode="constant")
graph = helper.make_graph([conv_node, node], 'test_graph_1', [E, F, B, D], [C], [E_init, F_init, B_init, D_init])
elif mode == "constant_value_wo_init":
node = onnx.helper.make_node('Pad', ['A', 'B', 'D'], ['C'], name='Pad', mode="constant")
graph = helper.make_graph([conv_node, node], 'test_graph_1', [E, F, B, D], [C], [E_init, F_init, B_init])
else:
node = onnx.helper.make_node('Pad', ['A', 'B'], ['C'], name='Pad', mode=mode)
graph = helper.make_graph([conv_node, node], 'test_graph_1', [E, F, B], [C], [E_init, F_init, B_init])
model = helper.make_model(graph)
q_config = {'Conv': self.q_config,
'Pad': {'activation':{'dtype': 2,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}
}}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)],
"D": [np.float32(10.), np.uint8(0)],
"E": [np.float32(10.), np.uint8(0)],
"F": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Conv", "Pad"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
node = onnx.helper.make_node('Pad', ['E', 'B', 'D'], ['C'], name='Pad', mode="constant")
graph = helper.make_graph([node], 'test_graph_1', [E, B, D], [C], [E_init, B_init, D_init])
model = helper.make_model(graph)
q_config = {'Pad': {'activation':{'dtype': 2,
'algorithm': 'minmax',
'scheme':'asym',
'granularity':'per_tensor'}
}}
quantize_params = {"C": [np.float32(10.), np.uint8(0)],
"E": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Pad"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_binary(self):
for op in ['Mul', 'Add']:
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 10])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 10])
node = onnx.helper.make_node(op, ['A', 'B'], ['C'], name=op)
graph = helper.make_graph([node], 'test_graph_1', [A, B], [C])
model = helper.make_model(graph)
q_config = {op: self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = [op]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_relu(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'],
name='Conv',
kernel_shape=[3, 3],
pads=[1, 1, 1, 1])
relu_node = onnx.helper.make_node('Relu', ['C'], ['D'], name='Relu')
graph = helper.make_graph([conv_node, relu_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_options.optimized_model_filepath = "./onnxrt_test/optimized_model.onnx"
session = ort.InferenceSession(model.SerializeToString(), sess_options)
model = onnx.load(sess_options.optimized_model_filepath)
q_config = {"Conv": self.q_config, "Relu": self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)],
"D": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Conv", "Relu"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_clip(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'],
name='Conv',
kernel_shape=[3, 3],
pads=[1, 1, 1, 1])
clip_node = onnx.helper.make_node('Clip', ['C'], ['D'], name='Clip')
graph = helper.make_graph([conv_node, clip_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
sess_options.optimized_model_filepath = "./onnxrt_test/optimized_model.onnx"
session = ort.InferenceSession(model.SerializeToString(), sess_options)
model = onnx.load(sess_options.optimized_model_filepath)
q_config = {"Conv": self.q_config, "Clip": self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)],
"D": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Conv", "Clip"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_activation(self):
for op in ["Relu", "LeakyRelu", "Sigmoid"]:
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 10])
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 10])
node = onnx.helper.make_node(op, ['A'], ['B'], name=op)
graph = helper.make_graph([node], 'test_graph_1', [A], [B])
model = helper.make_model(graph)
q_config = {op: self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = [op]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
a_value = np.random.randn(1, 10).astype(np.float32)
A_init = helper.make_tensor('A', TensorProto.FLOAT, [1, 10],
a_value.reshape(10).tolist())
graph = helper.make_graph([node], 'test_graph_1', [A], [B], [A_init])
model = helper.make_model(graph)
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_pooling(self):
for op in ["MaxPool", "GlobalAveragePool"]:
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 5, 5])
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
a_value = np.random.randn(1, 1, 5, 5).astype(np.float32)
A_init = helper.make_tensor('A', TensorProto.FLOAT, [1, 1, 5, 5],
a_value.reshape(25).tolist())
node = onnx.helper.make_node(op, ['A'], ['B'],
name=op,
kernel_shape=[3, 3],
pads=[1, 1, 1, 1])
graph = helper.make_graph([node], 'test_graph_1', [A], [B], [A_init])
q_config = {op: self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = [op]
for opset_version in [12, 13]:
opset = onnx.OperatorSetIdProto()
opset.version = opset_version
model = helper.make_model(graph, opset_imports=[opset])
self.static_test(model, q_config, quantize_params, quantizable_op_types)
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'],
name='Conv',
kernel_shape=[3, 3],
pads=[1, 1, 1, 1])
pool_node = onnx.helper.make_node(op, ['C'], ['D'], name=op)
graph = helper.make_graph([conv_node, pool_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph)
q_config = {"Conv": self.q_config, op: self.q_config}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)],
"D": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Conv", op]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
def test_exclude_node(self):
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 3, 3])
D = helper.make_tensor_value_info('D', TensorProto.FLOAT, [1, 1, 5, 5])
conv_node = onnx.helper.make_node('Conv', ['A', 'B'], ['C'],
name='Conv',
kernel_shape=[3, 3],
pads=[1, 1, 1, 1])
pool_node = onnx.helper.make_node("MaxPool", ['C'], ['D'], name="MaxPool")
graph = helper.make_graph([conv_node, pool_node], 'test_graph_1', [A, B], [D])
model = helper.make_model(graph)
q_config = {"Conv": self.q_config, "MaxPool": "fp32"}
quantize_params = {"A": [np.float32(10.), np.uint8(0)],
"B": [np.float32(10.), np.uint8(0)],
"C": [np.float32(10.), np.uint8(0)],
"D": [np.float32(10.), np.uint8(0)]}
quantizable_op_types = ["Conv", "MaxPool"]
self.static_test(model, q_config, quantize_params, quantizable_op_types)
if __name__ == "__main__":
unittest.main()
| 56.641221
| 129
| 0.509748
| 2,582
| 22,260
| 4.168861
| 0.063904
| 0.099405
| 0.074322
| 0.054348
| 0.847269
| 0.824508
| 0.802304
| 0.786882
| 0.773876
| 0.758083
| 0
| 0.040594
| 0.337107
| 22,260
| 392
| 130
| 56.785714
| 0.688872
| 0
| 0
| 0.596685
| 0
| 0
| 0.075562
| 0.005031
| 0
| 0
| 0
| 0
| 0.005525
| 1
| 0.044199
| false
| 0
| 0.035912
| 0
| 0.09116
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9122a65a266db6e9bcaf3d0fd8235ca1aa3cfe5c
| 48
|
py
|
Python
|
travelling_salesperson/__init__.py
|
btallin/travelling_salesperson_solver
|
f19d0e1cf335a647bf694057a43b4a2c4b24e226
|
[
"MIT"
] | 1
|
2020-04-26T22:33:21.000Z
|
2020-04-26T22:33:21.000Z
|
travelling_salesperson/__init__.py
|
btallin/alley_cat_solver
|
f19d0e1cf335a647bf694057a43b4a2c4b24e226
|
[
"MIT"
] | null | null | null |
travelling_salesperson/__init__.py
|
btallin/alley_cat_solver
|
f19d0e1cf335a647bf694057a43b4a2c4b24e226
|
[
"MIT"
] | null | null | null |
from travelling_salesperson.solver import solve
| 24
| 47
| 0.895833
| 6
| 48
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
914af87388c83d5306b8d84a8f55f176600f1a5d
| 1,216
|
py
|
Python
|
nfdapi/nfdcore/migrations/0018_auto_20180212_1635.py
|
kappu72/clevmetro-nfd
|
584c638190eaa077d010a24fe7f209b1cbb3725d
|
[
"BSD-2-Clause"
] | 3
|
2018-02-11T21:18:11.000Z
|
2019-01-19T06:58:58.000Z
|
nfdapi/nfdcore/migrations/0018_auto_20180212_1635.py
|
kappu72/clevmetro-nfd
|
584c638190eaa077d010a24fe7f209b1cbb3725d
|
[
"BSD-2-Clause"
] | 108
|
2018-02-02T15:42:39.000Z
|
2019-01-21T13:22:55.000Z
|
nfdapi/nfdcore/migrations/0018_auto_20180212_1635.py
|
kappu72/clevmetro-nfd
|
584c638190eaa077d010a24fe7f209b1cbb3725d
|
[
"BSD-2-Clause"
] | 5
|
2018-02-02T11:52:48.000Z
|
2022-03-01T16:09:09.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-02-12 16:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nfdcore', '0017_auto_20180212_1550'),
]
operations = [
migrations.RemoveField(
model_name='landanimaldetails',
name='gender',
),
migrations.RemoveField(
model_name='landanimaldetails',
name='marks',
),
migrations.RemoveField(
model_name='pondlakeanimaldetails',
name='gender',
),
migrations.RemoveField(
model_name='pondlakeanimaldetails',
name='marks',
),
migrations.RemoveField(
model_name='streamanimaldetails',
name='gender',
),
migrations.RemoveField(
model_name='streamanimaldetails',
name='marks',
),
migrations.RemoveField(
model_name='wetlandanimaldetails',
name='gender',
),
migrations.RemoveField(
model_name='wetlandanimaldetails',
name='marks',
),
]
| 25.333333
| 48
| 0.552632
| 92
| 1,216
| 7.130435
| 0.434783
| 0.256098
| 0.317073
| 0.365854
| 0.708841
| 0.708841
| 0
| 0
| 0
| 0
| 0
| 0.041147
| 0.340461
| 1,216
| 47
| 49
| 25.87234
| 0.776808
| 0.055921
| 0
| 0.8
| 1
| 0
| 0.199127
| 0.056769
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e676ecb24711167cebd10487d2762eab453b30a3
| 178
|
py
|
Python
|
retratodefases/utils/__init__.py
|
Loracio/retrato-de-fases
|
a2d870a69b911af3b78288708cb569c957506940
|
[
"MIT"
] | 3
|
2021-03-22T00:07:28.000Z
|
2021-03-22T12:11:18.000Z
|
retratodefases/utils/__init__.py
|
Loracio/retrato-de-fases
|
a2d870a69b911af3b78288708cb569c957506940
|
[
"MIT"
] | null | null | null |
retratodefases/utils/__init__.py
|
Loracio/retrato-de-fases
|
a2d870a69b911af3b78288708cb569c957506940
|
[
"MIT"
] | 2
|
2021-03-20T19:00:53.000Z
|
2021-03-22T12:19:52.000Z
|
try:
__PHASE_UTILS_IMPORTED__
except NameError:
__PHASE_UTILS_IMPORTED__= False
if not __PHASE_UTILS_IMPORTED__:
from . import utils
__PHASE_UTILS_IMPORTED__ = True
| 19.777778
| 35
| 0.803371
| 22
| 178
| 5.409091
| 0.545455
| 0.336134
| 0.605042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162921
| 178
| 9
| 36
| 19.777778
| 0.798658
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6958cde1cd1e7b762c55d68c7be888f33503a8f
| 2,270
|
py
|
Python
|
test/tests/loop.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | 1
|
2020-11-26T23:37:19.000Z
|
2020-11-26T23:37:19.000Z
|
test/tests/loop.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | null | null | null |
test/tests/loop.py
|
jonco3/dynamic
|
76d10b012a7860595c7d9abbdf542c7d8f2a4d53
|
[
"MIT"
] | null | null | null |
# output: ok
count = 0
total = 0
last = 0
for i in (1, 2, 3):
count += 1
total += i
last = i
assert count == 3
assert total == 6
assert last == 3
count = 0
total = 0
last = 0
i = 1
while i <= 3:
count += 1
total += i
last = i
i += 1
assert count == 3
assert total == 6
assert last == 3
count = 0
total = 0
last = 0
for i in (1, 2, 3):
count += 1
total += i
last = i
if i == 2:
break
assert count == 2
assert total == 3
assert last == 2
count = 0
total = 0
last = 0
i = 1
while i <= 3:
count += 1
total += i
last = i
if i == 2:
break
i += 1
assert count == 2
assert total == 3
assert last == 2
count = 0
total = 0
last = 0
for i in (1, 2, 3):
if i == 2:
continue
count += 1
total += i
last = i
assert count == 2
assert total == 4
assert last == 3
count = 0
total = 0
last = 0
i = 1
while i <= 3:
if i == 2:
i += 1
continue
count += 1
total += i
last = i
i += 1
assert count == 2
assert total == 4
assert last == 3
f = 0
for i in (1, 2, 3):
try:
if i == 2:
break
finally:
f = i
assert f == 2
f = 0
for i in (1, 2, 3):
try:
try:
try:
if i == 1:
break
finally:
f += 1
except Exception:
pass
finally:
f += 1
assert f == 2
f = 0
for i in (1, 2, 3):
try:
if i == 2:
continue
finally:
f += 1
assert f == 3
# else clause
didElse = False
for i in []:
pass
else:
didElse = True
assert(didElse)
didElse = False
for i in (1, 2, 3):
pass
else:
didElse = True
assert(didElse)
didElse = False
for i in (1, 2, 3):
if i == 3:
continue
else:
didElse = True
assert(didElse)
didElse = False
for i in (1, 2, 3):
if i == 3:
break
else:
didElse = True
assert(not didElse)
class OwnSequence:
def __init__(self, wrapped):
self.wrapped = wrapped
def __getitem__(self, index):
return self.wrapped[index]
count = 0
total = 0
last = 0
for i in OwnSequence([1, 2, 3]):
count += 1
total += i
last = i
assert count == 3
assert total == 6
assert last == 3
print('ok')
| 13.352941
| 34
| 0.491189
| 359
| 2,270
| 3.083565
| 0.114206
| 0.039747
| 0.059621
| 0.056911
| 0.777778
| 0.738934
| 0.738934
| 0.722674
| 0.704607
| 0.64047
| 0
| 0.077424
| 0.391189
| 2,270
| 169
| 35
| 13.431953
| 0.723589
| 0.009692
| 0
| 0.911565
| 0
| 0
| 0.000891
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.013605
| false
| 0.020408
| 0
| 0.006803
| 0.027211
| 0.006803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.