hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
832c8761a575784e1b640474985a108f44948f54
| 780
|
py
|
Python
|
utils/system/load_carla.py
|
Czworldy/GP_traj
|
96261f39a5a322092e3a6be98938bb4601f0f746
|
[
"MIT"
] | 1
|
2021-06-08T06:09:55.000Z
|
2021-06-08T06:09:55.000Z
|
utils/system/load_carla.py
|
Czworldy/GP_traj
|
96261f39a5a322092e3a6be98938bb4601f0f746
|
[
"MIT"
] | null | null | null |
utils/system/load_carla.py
|
Czworldy/GP_traj
|
96261f39a5a322092e3a6be98938bb4601f0f746
|
[
"MIT"
] | null | null | null |
import os
import sys
import glob
def load(path):
try:
# sys.path.append(path+'/PythonAPI')
# sys.path.append(glob.glob(path+'/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (
# sys.version_info.major,
# sys.version_info.minor,
# 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
sys.path.append(path+'/PythonAPI')
# sys.path.append(glob.glob(path+'/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (
# sys.version_info.major,
# sys.version_info.minor,
# 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
sys.path.append(glob.glob(path+'/PythonAPI/carla/dist/carla-0.9.10-py3.7-linux-x86_64.egg')[0])
except:
print('Fail to load carla library')
| 37.142857
| 103
| 0.580769
| 113
| 780
| 3.946903
| 0.336283
| 0.078475
| 0.14574
| 0.11435
| 0.784753
| 0.784753
| 0.784753
| 0.784753
| 0.784753
| 0.784753
| 0
| 0.041736
| 0.232051
| 780
| 21
| 104
| 37.142857
| 0.702838
| 0.546154
| 0
| 0
| 0
| 0.111111
| 0.270349
| 0.165698
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.333333
| 0
| 0.444444
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8355d614004b17bfcb3f45e4442a8b6ec42659c2
| 183
|
py
|
Python
|
pizza_store/enums/__init__.py
|
astsu-dev/pizza-store-backend
|
902f6e5e2c88ba029b2bff61da8fc4684664ead9
|
[
"MIT"
] | 2
|
2021-07-10T15:47:45.000Z
|
2021-12-13T18:09:30.000Z
|
pizza_store/enums/__init__.py
|
astsu-dev/pizza-store-backend
|
902f6e5e2c88ba029b2bff61da8fc4684664ead9
|
[
"MIT"
] | null | null | null |
pizza_store/enums/__init__.py
|
astsu-dev/pizza-store-backend
|
902f6e5e2c88ba029b2bff61da8fc4684664ead9
|
[
"MIT"
] | null | null | null |
from pizza_store.enums.permissions import CategoryPermission, ProductPermission
from pizza_store.enums.role import Role
__all__ = ["CategoryPermission", "ProductPermission", "Role"]
| 36.6
| 79
| 0.830601
| 19
| 183
| 7.684211
| 0.526316
| 0.123288
| 0.191781
| 0.260274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081967
| 183
| 4
| 80
| 45.75
| 0.869048
| 0
| 0
| 0
| 0
| 0
| 0.213115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
36a990cc6a6e3e5504c06bc7f2883f381f78b2f2
| 42
|
py
|
Python
|
lib/django-1.3/django/contrib/messages/__init__.py
|
MiCHiLU/google_appengine_sdk
|
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
|
[
"Apache-2.0"
] | 790
|
2015-01-03T02:13:39.000Z
|
2020-05-10T19:53:57.000Z
|
django/contrib/messages/__init__.py
|
mradziej/django
|
5d38965743a369981c9a738a298f467f854a2919
|
[
"BSD-3-Clause"
] | 1,361
|
2015-01-08T23:09:40.000Z
|
2020-04-14T00:03:04.000Z
|
django/contrib/messages/__init__.py
|
mradziej/django
|
5d38965743a369981c9a738a298f467f854a2919
|
[
"BSD-3-Clause"
] | 155
|
2015-01-08T22:59:31.000Z
|
2020-04-08T08:01:53.000Z
|
from api import *
from constants import *
| 14
| 23
| 0.761905
| 6
| 42
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 42
| 2
| 24
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
36e973d4fc94162fdd1e28fa244ac8e1f2e1f0e6
| 25
|
py
|
Python
|
pylfi/priors/__init__.py
|
nicolossus/pylfi
|
7950aff5c36e7368cbe77b32ef348966b905f5cf
|
[
"MIT"
] | null | null | null |
pylfi/priors/__init__.py
|
nicolossus/pylfi
|
7950aff5c36e7368cbe77b32ef348966b905f5cf
|
[
"MIT"
] | null | null | null |
pylfi/priors/__init__.py
|
nicolossus/pylfi
|
7950aff5c36e7368cbe77b32ef348966b905f5cf
|
[
"MIT"
] | null | null | null |
from .prior import Prior
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
36e987de86625fb265967bed791c06ac9307414b
| 92
|
py
|
Python
|
altair/__init__.py
|
jakevdp/altair2
|
46d391034c5b72867c9e4d01f3a7c7c536533add
|
[
"BSD-3-Clause"
] | 2
|
2018-02-03T05:35:52.000Z
|
2018-02-05T21:00:18.000Z
|
altair/__init__.py
|
jakevdp/altair2
|
46d391034c5b72867c9e4d01f3a7c7c536533add
|
[
"BSD-3-Clause"
] | null | null | null |
altair/__init__.py
|
jakevdp/altair2
|
46d391034c5b72867c9e4d01f3a7c7c536533add
|
[
"BSD-3-Clause"
] | null | null | null |
from .schema import vegalite_version
from .api import Chart
from .schema.channels import *
| 18.4
| 36
| 0.804348
| 13
| 92
| 5.615385
| 0.615385
| 0.273973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 92
| 4
| 37
| 23
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
36ed2a423ccbe261f9ce0fa402dda9039931c5d3
| 20,707
|
py
|
Python
|
src/network_architecture.py
|
kms8527/rl_decision_making_with_uncertainty
|
7a655e218c432be1498ff19e4811fc7595bd1413
|
[
"MIT"
] | 30
|
2020-04-27T13:01:58.000Z
|
2022-03-15T07:15:17.000Z
|
src/network_architecture.py
|
kms8527/rl_decision_making_with_uncertainty
|
7a655e218c432be1498ff19e4811fc7595bd1413
|
[
"MIT"
] | 5
|
2020-04-23T08:29:36.000Z
|
2022-02-10T01:26:01.000Z
|
src/network_architecture.py
|
kms8527/rl_decision_making_with_uncertainty
|
7a655e218c432be1498ff19e4811fc7595bd1413
|
[
"MIT"
] | 12
|
2020-06-11T04:19:53.000Z
|
2022-02-16T09:30:26.000Z
|
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, add, Input, Reshape, Conv1D, MaxPooling1D, concatenate
import keras.backend as K
class NetworkMLP(object):
"""
This class is used to build a neural network with an MLP structure.
There are different functions that builds a standard MLP, w/wo dueling architecture,
and w/wo additional untrainable prior network.
Args:
nb_inputs (int): Number of inputs to the network.
nb_outputs (int): Number of outputs from the network.
nb_hidden_layers (int): Number of hidden layers.
nb_hidden_neurons (int): Number of neurons in the hidden layers.
duel (bool): Use dueling architecture.
prior (bool): Use an additional untrainable prior network.
prior_scale_factor (float): Scale factor that balances trainable/untrainable contribution to the output.
duel_type (str): 'avg', 'max', or 'naive'
activation (str): Type of activation function, see Keras for definition
window_length (int): How many historic states that are used as input. Set to 1 in this work.
"""
def __init__(self, nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons, duel, prior, prior_scale_factor=10.,
duel_type='avg', activation='relu', window_length=1):
self.model = None
if not prior and not duel:
self.build_mlp(nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons, activation=activation,
window_length=window_length)
elif not prior and duel:
self.build_mlp_dueling(nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons, dueling_type=duel_type,
activation=activation, window_length=window_length)
elif prior and not duel:
self.build_prior_plus_trainable(nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons,
activation=activation, prior_scale_factor=prior_scale_factor,
window_length=window_length)
elif prior and duel:
self.build_prior_plus_trainable_dueling(nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons,
dueling_type=duel_type, activation=activation,
prior_scale_factor=prior_scale_factor, window_length=window_length)
else:
raise Exception('Error in Network creation')
def build_mlp(self, nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons, activation='relu', window_length=1):
self.model = Sequential()
self.model.add(Flatten(input_shape=(window_length, nb_inputs)))
for _ in range(nb_hidden_layers):
self.model.add(Dense(nb_hidden_neurons))
self.model.add(Activation(activation))
self.model.add(Dense(nb_outputs, activation='linear'))
def build_mlp_dueling(self, nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons, dueling_type='avg',
activation='relu', window_length=1):
self.build_mlp(nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons, activation=activation,
window_length=window_length)
layer = self.model.layers[-2]
y = Dense(nb_outputs + 1, activation='linear')(layer.output)
if dueling_type == 'avg':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
output_shape=(nb_outputs,))(y)
elif dueling_type == 'max':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True),
output_shape=(nb_outputs,))(y)
elif dueling_type == 'naive':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_outputs,))(y)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
self.model = Model(inputs=self.model.input, outputs=outputlayer)
def build_prior_plus_trainable(self, nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons, activation='relu',
prior_scale_factor=1., window_length=1):
net_input = Input(shape=(window_length, nb_inputs), name='input')
prior_net = Flatten()(net_input)
for _ in range(nb_hidden_layers):
prior_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=False)(prior_net)
prior_out = Dense(nb_outputs, activation='linear', trainable=False, name='prior_out')(prior_net)
prior_scale = Lambda(lambda x: x * prior_scale_factor, name='prior_scale')(prior_out)
trainable_net = Flatten(input_shape=(window_length, nb_inputs))(net_input)
for _ in range(nb_hidden_layers):
trainable_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=True)(trainable_net)
trainable_out = Dense(nb_outputs, activation='linear', trainable=True, name='trainable_out')(trainable_net)
add_output = add([trainable_out, prior_scale], name='add')
self.model = Model(inputs=net_input, outputs=add_output)
def build_prior_plus_trainable_dueling(self, nb_inputs, nb_outputs, nb_hidden_layers, nb_hidden_neurons,
activation='relu', prior_scale_factor=1., dueling_type='avg',
window_length=1):
net_input = Input(shape=(window_length, nb_inputs), name='input')
prior_net = Flatten()(net_input)
for _ in range(nb_hidden_layers):
prior_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=False)(prior_net)
prior_out_wo_dueling = Dense(nb_outputs + 1, activation='linear', trainable=False,
name='prior_out_wo_dueling')(prior_net)
if dueling_type == 'avg':
prior_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
output_shape=(nb_outputs,), name='prior_out')(prior_out_wo_dueling)
elif dueling_type == 'max':
prior_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True),
output_shape=(nb_outputs,), name='prior_out')(prior_out_wo_dueling)
elif dueling_type == 'naive':
prior_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_outputs,),
name='prior_out')(prior_out_wo_dueling)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
prior_scale = Lambda(lambda x: x * prior_scale_factor, name='prior_scale')(prior_out)
trainable_net = Flatten(input_shape=(window_length, nb_inputs))(net_input)
for _ in range(nb_hidden_layers):
trainable_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=True)(trainable_net)
trainable_out_wo_dueling = Dense(nb_outputs + 1, activation='linear', trainable=True,
name='trainable_out_wo_dueling')(trainable_net)
if dueling_type == 'avg':
trainable_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
output_shape=(nb_outputs,), name='trainable_out')(trainable_out_wo_dueling)
elif dueling_type == 'max':
trainable_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True),
output_shape=(nb_outputs,), name='trainable_out')(trainable_out_wo_dueling)
elif dueling_type == 'naive':
trainable_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_outputs,),
name='trainable_out')(trainable_out_wo_dueling)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
add_output = add([trainable_out, prior_scale], name='add')
self.model = Model(inputs=net_input, outputs=add_output)
class NetworkCNN(object):
"""
This class is used to build a neural network with a CNN structure.
There are different functions that builds a standard CNN, w/wo dueling architecture,
and w/wo additional untrainable prior network.
Args:
nb_ego_states (int): Number of states that describe the ego vehicle.
nb_states_per_vehicle (int): Number of states that describe each of the surrounding vehicles.
nb_vehicles (int): Maximum number of surrounding vehicles.
nb_actions: (int): Number of outputs from the network.
nb_conv_layers (int): Number of convolutional layers.
nb_conv_filters (int): Number of convolutional filters.
nb_hidden_fc_layers (int): Number of hidden layers.
nb_hidden_neurons (int): Number of neurons in the hidden layers.
duel (bool): Use dueling architecture.
prior (bool): Use an additional untrainable prior network.
prior_scale_factor (float): Scale factor that balances trainable/untrainable contribution to the output.
duel_type (str): 'avg', 'max', or 'naive'
activation (str): Type of activation function, see Keras for definition
window_length (int): How many historic states that are used as input. Set to 1 in this work.
"""
def __init__(self, nb_ego_states, nb_states_per_vehicle, nb_vehicles, nb_actions, nb_conv_layers, nb_conv_filters,
nb_hidden_fc_layers, nb_hidden_neurons, duel, prior, prior_scale_factor=10., duel_type='avg',
activation='relu', window_length=1):
self.model = None
if not prior and not duel:
self.build_cnn(nb_ego_states, nb_states_per_vehicle, nb_vehicles, nb_actions, nb_conv_layers,
nb_conv_filters, nb_hidden_fc_layers, nb_hidden_neurons, activation=activation,
window_length=window_length)
elif not prior and duel:
self.build_cnn_dueling(nb_ego_states, nb_states_per_vehicle, nb_vehicles, nb_actions, nb_conv_layers,
nb_conv_filters, nb_hidden_fc_layers, nb_hidden_neurons, dueling_type=duel_type,
activation=activation, window_length=window_length)
elif prior and duel:
self.build_cnn_dueling_prior(nb_ego_states, nb_states_per_vehicle, nb_vehicles, nb_actions, nb_conv_layers,
nb_conv_filters, nb_hidden_fc_layers, nb_hidden_neurons,
dueling_type=duel_type, activation=activation,
prior_scale_factor=prior_scale_factor, window_length=window_length)
else:
raise Exception('Error in Network creation')
def build_cnn(self, nb_ego_states, nb_states_per_vehicle, nb_vehicles, nb_actions, nb_conv_layers, nb_conv_filters,
nb_hidden_fc_layers, nb_hidden_neurons, activation='relu', window_length=1):
nb_inputs = nb_ego_states + nb_states_per_vehicle * nb_vehicles
net_input = Input(shape=(window_length, nb_inputs), name='input')
flat_input = Flatten()(net_input)
input_ego = Lambda(lambda state: state[:, :nb_ego_states * window_length])(flat_input)
input_others = Lambda(lambda state: state[:, nb_ego_states * window_length:])(flat_input)
input_others_reshaped = Reshape((nb_vehicles * nb_states_per_vehicle * window_length, 1,),
input_shape=(nb_vehicles * nb_states_per_vehicle *
window_length,))(input_others)
conv_net = Conv1D(nb_conv_filters, nb_states_per_vehicle*window_length,
strides=nb_states_per_vehicle*window_length, activation=activation,
kernel_initializer='glorot_normal')(input_others_reshaped)
for _ in range(nb_conv_layers-1):
conv_net = Conv1D(nb_conv_filters, 1, strides=1, activation=activation,
kernel_initializer='glorot_normal')(conv_net)
pool = MaxPooling1D(pool_size=nb_vehicles)(conv_net)
conv_net_out = Reshape((nb_conv_filters,), input_shape=(1, nb_conv_filters,), name='convnet_out')(pool)
merged = concatenate([input_ego, conv_net_out])
joint_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal')(merged)
for _ in range(nb_hidden_fc_layers-1):
joint_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal')(joint_net)
output = Dense(nb_actions, activation='linear', name='output')(joint_net)
self.model = Model(inputs=net_input, outputs=output)
def build_cnn_dueling(self, nb_ego_states, nb_states_per_vehicle, nb_vehicles, nb_actions, nb_conv_layers,
nb_conv_filters, nb_hidden_fc_layers, nb_hidden_neurons, activation='relu', window_length=1,
dueling_type='avg'):
self. build_cnn(nb_ego_states=nb_ego_states, nb_states_per_vehicle=nb_states_per_vehicle,
nb_vehicles=nb_vehicles, nb_actions=nb_actions, nb_conv_layers=nb_conv_layers,
nb_conv_filters=nb_conv_filters, nb_hidden_fc_layers=nb_hidden_fc_layers,
nb_hidden_neurons=nb_hidden_neurons, activation=activation, window_length=window_length)
layer = self.model.layers[-2]
y = Dense(nb_actions + 1, activation='linear')(layer.output)
if dueling_type == 'avg':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
output_shape=(nb_actions,))(y)
elif dueling_type == 'max':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True),
output_shape=(nb_actions,))(y)
elif dueling_type == 'naive':
outputlayer = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:], output_shape=(nb_actions,))(y)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
self.model = Model(inputs=self.model.input, outputs=outputlayer)
def build_cnn_dueling_prior(self, nb_ego_states, nb_states_per_vehicle, nb_vehicles, nb_actions, nb_conv_layers,
nb_conv_filters, nb_hidden_fc_layers, nb_hidden_neurons, activation='relu',
window_length=1, dueling_type='avg', prior_scale_factor=1.):
nb_inputs = nb_ego_states + nb_states_per_vehicle * nb_vehicles
net_input = Input(shape=(window_length, nb_inputs), name='input')
flat_input = Flatten()(net_input)
input_ego = Lambda(lambda state: state[:, :nb_ego_states * window_length])(flat_input)
input_others = Lambda(lambda state: state[:, nb_ego_states * window_length:])(flat_input)
input_others_reshaped = Reshape((nb_vehicles * nb_states_per_vehicle * window_length, 1,),
input_shape=(nb_vehicles * nb_states_per_vehicle *
window_length,))(input_others)
prior_conv_net = Conv1D(nb_conv_filters, nb_states_per_vehicle * window_length,
strides=nb_states_per_vehicle * window_length, activation=activation,
kernel_initializer='glorot_normal', trainable=False)(input_others_reshaped)
for _ in range(nb_conv_layers - 1):
prior_conv_net = Conv1D(nb_conv_filters, 1, strides=1, activation=activation,
kernel_initializer='glorot_normal', trainable=False)(prior_conv_net)
prior_pool = MaxPooling1D(pool_size=nb_vehicles)(prior_conv_net)
prior_conv_net_out = Reshape((nb_conv_filters,), input_shape=(1, nb_conv_filters,),
name='prior_convnet_out')(prior_pool)
prior_merged = concatenate([input_ego, prior_conv_net_out])
prior_joint_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=False)(prior_merged)
for _ in range(nb_hidden_fc_layers-1):
prior_joint_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=False)(prior_joint_net)
prior_out_wo_dueling = Dense(nb_actions+1, activation='linear', name='prior_out_wo_dueling',
trainable=False)(prior_joint_net)
if dueling_type == 'avg':
prior_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
output_shape=(nb_actions,), name='prior_out')(prior_out_wo_dueling)
elif dueling_type == 'max':
prior_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True),
output_shape=(nb_actions,), name='prior_out')(prior_out_wo_dueling)
elif dueling_type == 'naive':
prior_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:],
output_shape=(nb_actions,), name='prior_out')(prior_out_wo_dueling)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
prior_scale = Lambda(lambda x: x * prior_scale_factor, name='prior_scale')(prior_out)
trainable_conv_net = Conv1D(nb_conv_filters, nb_states_per_vehicle * window_length,
strides=nb_states_per_vehicle * window_length, activation=activation,
kernel_initializer='glorot_normal', trainable=True)(input_others_reshaped)
for _ in range(nb_conv_layers - 1):
trainable_conv_net = Conv1D(nb_conv_filters, 1, strides=1, activation=activation,
kernel_initializer='glorot_normal', trainable=True)(trainable_conv_net)
trainable_pool = MaxPooling1D(pool_size=nb_vehicles)(trainable_conv_net)
trainable_conv_net_out = Reshape((nb_conv_filters,), input_shape=(1, nb_conv_filters,),
name='trainable_convnet_out')(trainable_pool)
trainable_merged = concatenate([input_ego, trainable_conv_net_out])
trainable_joint_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=True)(trainable_merged)
for _ in range(nb_hidden_fc_layers-1):
trainable_joint_net = Dense(nb_hidden_neurons, activation=activation, kernel_initializer='glorot_normal',
trainable=True)(trainable_joint_net)
trainable_out_wo_dueling = Dense(nb_actions + 1, activation='linear', name='trainable_out_wo_dueling',
trainable=True)(trainable_joint_net)
if dueling_type == 'avg':
trainable_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
output_shape=(nb_actions,), name='trainable_out')(trainable_out_wo_dueling)
elif dueling_type == 'max':
trainable_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - K.max(a[:, 1:], keepdims=True),
output_shape=(nb_actions,), name='trainable_out')(trainable_out_wo_dueling)
elif dueling_type == 'naive':
trainable_out = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:],
output_shape=(nb_actions,), name='trainable_out')(trainable_out_wo_dueling)
else:
assert False, "dueling_type must be one of {'avg','max','naive'}"
add_output = add([trainable_out, prior_scale], name='final_output')
self.model = Model(inputs=net_input, outputs=add_output)
| 66.796774
| 120
| 0.631236
| 2,572
| 20,707
| 4.74028
| 0.061042
| 0.040026
| 0.03937
| 0.03248
| 0.920686
| 0.901657
| 0.876394
| 0.842356
| 0.831037
| 0.804462
| 0
| 0.0079
| 0.26648
| 20,707
| 309
| 121
| 67.012945
| 0.794786
| 0.099097
| 0
| 0.613445
| 0
| 0
| 0.059447
| 0.010538
| 0
| 0
| 0
| 0
| 0.02521
| 1
| 0.037815
| false
| 0
| 0.012605
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7fc96fdbbeba39e3dc3674c16474510f2cec35d7
| 36
|
py
|
Python
|
src/aiocleverbot/__init__.py
|
johan-naizu/aiocleverbot
|
ca82dbc2eafdee87435903c9b1bace481927996d
|
[
"MIT"
] | 3
|
2021-05-04T14:21:52.000Z
|
2021-06-23T15:41:35.000Z
|
src/aiocleverbot/__init__.py
|
johan-naizu/aiocleverbot
|
ca82dbc2eafdee87435903c9b1bace481927996d
|
[
"MIT"
] | 1
|
2021-05-06T12:17:13.000Z
|
2022-03-04T10:28:18.000Z
|
src/aiocleverbot/__init__.py
|
johan-naizu/aiocleverbot
|
ca82dbc2eafdee87435903c9b1bace481927996d
|
[
"MIT"
] | null | null | null |
from .aiocleverbot import cleverbot
| 18
| 35
| 0.861111
| 4
| 36
| 7.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.96875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7fce90d6f92015e289905a4d20814ef61f94859e
| 24
|
py
|
Python
|
gdbplotlib/__init__.py
|
X-Neon/gdbplotlib
|
80c4d59826f3f91275bb223c77ecabc49ae90578
|
[
"MIT"
] | 20
|
2020-03-14T00:52:39.000Z
|
2022-01-18T22:33:19.000Z
|
gdbplotlib/__init__.py
|
X-Neon/gdbplotlib
|
80c4d59826f3f91275bb223c77ecabc49ae90578
|
[
"MIT"
] | 1
|
2021-11-18T08:21:38.000Z
|
2021-11-20T21:24:58.000Z
|
gdbplotlib/__init__.py
|
X-Neon/gdbplotlib
|
80c4d59826f3f91275bb223c77ecabc49ae90578
|
[
"MIT"
] | 1
|
2021-02-23T00:04:43.000Z
|
2021-02-23T00:04:43.000Z
|
from . import plot, save
| 24
| 24
| 0.75
| 4
| 24
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d14cc836ef8377ba4048d1c88819aa8f9e1d94d
| 2,647
|
py
|
Python
|
OCRLibrary/utils/imagereading/text_locating.py
|
bendurston/robotframework-ocrlibrary
|
da310346a60260165118cba112995e58ebb3a103
|
[
"Apache-2.0"
] | 5
|
2021-05-19T07:09:43.000Z
|
2022-02-10T16:33:05.000Z
|
OCRLibrary/utils/imagereading/text_locating.py
|
bendurston/robotframework-ocrlibrary
|
da310346a60260165118cba112995e58ebb3a103
|
[
"Apache-2.0"
] | 3
|
2021-05-19T07:15:28.000Z
|
2021-05-23T20:12:41.000Z
|
OCRLibrary/utils/imagereading/text_locating.py
|
bendurston/robotframework-ocrlibrary
|
da310346a60260165118cba112995e58ebb3a103
|
[
"Apache-2.0"
] | 1
|
2021-05-19T07:10:03.000Z
|
2021-05-19T07:10:03.000Z
|
"""
Text locating module.
"""
from pytesseract import image_to_data, Output
def return_text_coordinates(img, text, pyt_conf, lang):
"""
This keyword is find the coordinates of text in an image.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
x = box_bounds[0] + box_bounds[2]/2
y = box_bounds[1] + box_bounds[3]/2
return x, y
return None
def return_multiple_text_coordinates(img, text, pyt_conf, lang):
"""
To be used when there are multiple occurrences of the same text you wish to find.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
list_of_coordinates = []
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
x = box_bounds[0] + box_bounds[2]/2
y = box_bounds[1] + box_bounds[3]/2
coordinates = (x, y)
list_of_coordinates.append(coordinates)
if not list_of_coordinates:
return None
return list_of_coordinates
def return_text_bounds(img, text, pyt_conf, lang):
"""
This keyword is find the coordinates of text in an image.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
return box_bounds
return None
def return_multiple_text_bounds(img, text, pyt_conf, lang):
"""
To be used when there are multiple occurrences of the same text you wish to find.
"""
data = image_to_data(img, output_type=Output.DICT, config=pyt_conf, lang=lang)
boxes = len(data['level'])
list_of_box_bounds = []
for i in range(boxes):
text_from_image = data['text'][i]
if text_from_image == text:
box_bounds = (int(data['left'][i]), int(data['top'][i]), int(data['width'][i]), int(data['height'][i]))
list_of_box_bounds.append(box_bounds)
if not list_of_box_bounds:
return None
return list_of_box_bounds
| 38.362319
| 115
| 0.622592
| 398
| 2,647
| 3.934673
| 0.155779
| 0.103448
| 0.061303
| 0.03576
| 0.836526
| 0.808429
| 0.773946
| 0.742018
| 0.742018
| 0.742018
| 0
| 0.00592
| 0.234227
| 2,647
| 68
| 116
| 38.926471
| 0.76665
| 0.113714
| 0
| 0.666667
| 0
| 0
| 0.04741
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.020833
| 0
| 0.270833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3d46c4af15c615220255c7edeb7f8324b1f6dce6
| 37
|
py
|
Python
|
vase/__init__.py
|
gantzgraf/vape
|
f939cb527d72d852cb0919a57332110c15c5fd4a
|
[
"MIT"
] | 4
|
2020-03-25T06:09:39.000Z
|
2021-03-23T11:22:00.000Z
|
vase/__init__.py
|
gantzgraf/vape
|
f939cb527d72d852cb0919a57332110c15c5fd4a
|
[
"MIT"
] | 1
|
2020-10-02T14:50:30.000Z
|
2020-10-12T15:24:24.000Z
|
vase/__init__.py
|
gantzgraf/vape
|
f939cb527d72d852cb0919a57332110c15c5fd4a
|
[
"MIT"
] | 1
|
2021-02-20T11:32:34.000Z
|
2021-02-20T11:32:34.000Z
|
from vase.version import __version__
| 18.5
| 36
| 0.864865
| 5
| 37
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3d5db949fe9eba516f73d8617a97cf0aeaa0d4a9
| 108
|
py
|
Python
|
popcorn_gallery/notifications/context_processors.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 15
|
2015-03-23T02:55:20.000Z
|
2021-01-12T12:42:30.000Z
|
popcorn_gallery/notifications/context_processors.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | null | null | null |
popcorn_gallery/notifications/context_processors.py
|
Koenkk/popcorn_maker
|
0978b9f98dacd4e8eb753404b24eb584f410aa11
|
[
"BSD-3-Clause"
] | 16
|
2015-02-18T21:43:31.000Z
|
2021-11-09T22:50:03.000Z
|
from .models import Notice
def notifications(request):
return {'notice_list': Notice.live.all()[:5]}
| 18
| 50
| 0.703704
| 14
| 108
| 5.357143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 0.148148
| 108
| 5
| 51
| 21.6
| 0.804348
| 0
| 0
| 0
| 0
| 0
| 0.101852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
e9f2f22fd79eef346224db15cb32a1f63571d160
| 40
|
py
|
Python
|
tools/bianjie.py
|
bianjie0618/marked_SiamMask
|
51d2aa832a74b76a5cd63aae274d1dea1dc16a2c
|
[
"MIT"
] | null | null | null |
tools/bianjie.py
|
bianjie0618/marked_SiamMask
|
51d2aa832a74b76a5cd63aae274d1dea1dc16a2c
|
[
"MIT"
] | null | null | null |
tools/bianjie.py
|
bianjie0618/marked_SiamMask
|
51d2aa832a74b76a5cd63aae274d1dea1dc16a2c
|
[
"MIT"
] | null | null | null |
import torch
# print(torch.__version__)
| 13.333333
| 26
| 0.8
| 5
| 40
| 5.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 3
| 26
| 13.333333
| 0.777778
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
18598366367edfcefb01654f69c2bd013d7ae463
| 3,892
|
py
|
Python
|
tests/test_functions.py
|
theblackfly/protoflow
|
02a77e59f6afc8d462a738874d06eca810911166
|
[
"MIT"
] | 3
|
2020-10-07T05:04:05.000Z
|
2021-02-10T15:04:55.000Z
|
tests/test_functions.py
|
theblackfly/protoflow
|
02a77e59f6afc8d462a738874d06eca810911166
|
[
"MIT"
] | 5
|
2020-04-09T13:36:15.000Z
|
2020-12-17T16:30:50.000Z
|
tests/test_functions.py
|
theblackfly/protoflow
|
02a77e59f6afc8d462a738874d06eca810911166
|
[
"MIT"
] | 2
|
2020-10-01T21:48:16.000Z
|
2021-04-10T18:20:25.000Z
|
"""ProtoFlow functions test suite."""
import unittest
import numpy as np
from protoflow.functions import distances
class TestDistances(unittest.TestCase):
def setUp(self):
pass
def test_lpnorm_p2_1d(self):
# yapf: disable
x = np.array([[0, 0]], dtype='float32')
w = np.array([[1, 1]], dtype='float32')
actual = distances.lpnorm_distance(x, w, p=2)
desired = np.array([[1.4142]])
# yapf: enable
self.assertIsNone(
np.testing.assert_array_almost_equal(actual, desired, decimal=4))
def test_lpnorm_p2_2d(self):
# yapf: disable
x = np.array([[0, 0],
[1, 1]], dtype='float32')
w = np.array([[1, 1],
[1, 1],
[1, 1]], dtype='float32')
actual = distances.lpnorm_distance(x, w, p=2)
desired = np.array([[1.4142, 1.4142, 1.4142],
[0.0000, 0.0000, 0.0000]])
# yapf: enable
self.assertIsNone(
np.testing.assert_array_almost_equal(actual, desired, decimal=4))
def test_omega(self):
# yapf: disable
x = np.array([[0, 0],
[1, 1]], dtype='float32')
w = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]], dtype='float32')
omega = np.eye(w.shape[1], dtype='float32')
actual = distances.omega_distance(x, w, omega)
desired = np.array([[0.0000, 1.0000, 1.0000, 1.4142],
[1.4142, 1.0000, 1.0000, 0.0000]]) ** 2
# yapf: enable
self.assertIsNone(
np.testing.assert_array_almost_equal(actual, desired, decimal=4))
def test_lomega_eye_omegas(self):
# yapf: disable
x = np.array([[0, 0],
[1, 1]], dtype='float32')
w = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]], dtype='float32')
omegas = np.stack([np.eye(w.shape[1], dtype='float32')] * w.shape[0])
actual = distances.lomega_distance(x, w, omegas)
desired = np.array([[0.0000, 1.0000, 1.0000, 1.4142],
[1.4142, 1.0000, 1.0000, 0.0000]]) ** 2
# yapf: enable
self.assertIsNone(
np.testing.assert_array_almost_equal(actual, desired, decimal=4))
def test_lomega_zeros_omegas(self):
# yapf: disable
x = np.array([[0, 0],
[1, 1]], dtype='float32')
w = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]], dtype='float32')
omegas = np.stack([np.zeros(
(w.shape[1], w.shape[1]), dtype='float32')] * w.shape[0])
actual = distances.lomega_distance(x, w, omegas)
desired = np.array([[0.000000, 0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 0.000000, 0.000000]])
# yapf: enable
self.assertIsNone(
np.testing.assert_array_almost_equal(actual, desired, decimal=4))
def test_lomega_ones_omegas(self):
# yapf: disable
x = np.array([[0, 0],
[1, 1]], dtype='float32')
w = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]], dtype='float32')
omegas = np.stack([np.ones(
(w.shape[1], w.shape[1]), dtype='float32')] * w.shape[0])
actual = distances.lomega_distance(x, w, omegas)
desired = np.array([[0.000000, 1.4142135, 1.4142135, 2.828427],
[2.828427, 1.4142135, 1.4142135, 0.000000]]) ** 2
# yapf: enable
self.assertIsNone(
np.testing.assert_array_almost_equal(actual, desired, decimal=4))
if __name__ == '__main__':
unittest.main()
| 36.373832
| 77
| 0.487153
| 476
| 3,892
| 3.882353
| 0.12605
| 0.020563
| 0.105519
| 0.083333
| 0.829545
| 0.8171
| 0.8171
| 0.790584
| 0.777056
| 0.777056
| 0
| 0.139044
| 0.355087
| 3,892
| 106
| 78
| 36.716981
| 0.597211
| 0.049846
| 0
| 0.625
| 0
| 0
| 0.0326
| 0
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.0875
| false
| 0.0125
| 0.0375
| 0
| 0.1375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
185bde9e31af09145e63b665a66f4c0b53a750fe
| 26
|
py
|
Python
|
pyopt_tools/__init__.py
|
Fus3n/pyopt-tools
|
ec30de59c885fc1f03b1256d931131b22e1cf5b7
|
[
"MIT"
] | 2
|
2022-01-08T21:09:37.000Z
|
2022-01-12T16:09:04.000Z
|
pyopt_tools/__init__.py
|
Fus3n/pyopt-tools
|
ec30de59c885fc1f03b1256d931131b22e1cf5b7
|
[
"MIT"
] | null | null | null |
pyopt_tools/__init__.py
|
Fus3n/pyopt-tools
|
ec30de59c885fc1f03b1256d931131b22e1cf5b7
|
[
"MIT"
] | null | null | null |
from pyopt_tools import *
| 13
| 25
| 0.807692
| 4
| 26
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 26
| 1
| 26
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a14d4177b56d1da2699f6d71ae1b86091568fbe8
| 136
|
py
|
Python
|
myML/myml.py
|
bobbyisot/MachineLearning
|
522f2a832d457ea6d14f8f8c72fca44bc15f59db
|
[
"MIT"
] | null | null | null |
myML/myml.py
|
bobbyisot/MachineLearning
|
522f2a832d457ea6d14f8f8c72fca44bc15f59db
|
[
"MIT"
] | null | null | null |
myML/myml.py
|
bobbyisot/MachineLearning
|
522f2a832d457ea6d14f8f8c72fca44bc15f59db
|
[
"MIT"
] | null | null | null |
class Test_2020(object):
def __init__(self):
self.a = 1
print(f'success')
def add_a(self):
self.a += 1
| 17
| 25
| 0.529412
| 20
| 136
| 3.3
| 0.65
| 0.242424
| 0.272727
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0.338235
| 136
| 7
| 26
| 19.428571
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.051471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.5
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a1aec7d801aea5ebf861295cb9900e84026ee22b
| 32
|
py
|
Python
|
cit-api/pipeline/tests/views/__init__.py
|
bcgov/CIT
|
b9db4f169b52e9a6293b3ee1e61935888074215a
|
[
"Apache-2.0"
] | 10
|
2020-11-12T15:13:40.000Z
|
2022-03-05T22:33:08.000Z
|
cit-api/pipeline/tests/views/__init__.py
|
bcgov/CIT
|
b9db4f169b52e9a6293b3ee1e61935888074215a
|
[
"Apache-2.0"
] | 28
|
2020-07-17T16:33:55.000Z
|
2022-03-21T16:24:25.000Z
|
cit-api/pipeline/tests/views/__init__.py
|
bcgov/CIT
|
b9db4f169b52e9a6293b3ee1e61935888074215a
|
[
"Apache-2.0"
] | 5
|
2020-11-02T23:39:53.000Z
|
2022-03-01T19:09:45.000Z
|
from .test_opportunity import *
| 16
| 31
| 0.8125
| 4
| 32
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b80ed33f2221b2a3732b6390e63d6a5350d7dd13
| 43
|
py
|
Python
|
pextant/analysis/kmlsupport.py
|
norheim/pextant
|
f4235719279c0e6f178ae1e0f8b1ea3346533915
|
[
"MIT"
] | null | null | null |
pextant/analysis/kmlsupport.py
|
norheim/pextant
|
f4235719279c0e6f178ae1e0f8b1ea3346533915
|
[
"MIT"
] | 1
|
2019-12-03T03:52:41.000Z
|
2019-12-04T14:50:36.000Z
|
pextant/analysis/kmlsupport.py
|
norheim/pextant
|
f4235719279c0e6f178ae1e0f8b1ea3346533915
|
[
"MIT"
] | 1
|
2019-12-03T02:37:57.000Z
|
2019-12-03T02:37:57.000Z
|
from pykml.factory import KML_ElementMaker
| 21.5
| 42
| 0.883721
| 6
| 43
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 1
| 43
| 43
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
62ce75ed0bc80d716e869ec2ea08d0c3839e1ab4
| 354
|
py
|
Python
|
ensemble_boxes/__init__.py
|
Sergey-Zlobin/Weighted-Boxes-Fusion
|
773ed6f9513ade442c0f89885f3a36d95cf0629d
|
[
"MIT"
] | 2
|
2021-07-19T05:19:00.000Z
|
2022-03-06T03:48:02.000Z
|
ensemble_boxes/__init__.py
|
Sergey-Zlobin/Weighted-Boxes-Fusion
|
773ed6f9513ade442c0f89885f3a36d95cf0629d
|
[
"MIT"
] | null | null | null |
ensemble_boxes/__init__.py
|
Sergey-Zlobin/Weighted-Boxes-Fusion
|
773ed6f9513ade442c0f89885f3a36d95cf0629d
|
[
"MIT"
] | 1
|
2021-09-15T21:26:39.000Z
|
2021-09-15T21:26:39.000Z
|
# coding: utf-8
__author__ = 'ZFTurbo: https://kaggle.com/zfturbo'
from .ensemble_boxes_wbf import weighted_boxes_fusion
from .ensemble_boxes_nmw import non_maximum_weighted
from .ensemble_boxes_nms import nms_method
from .ensemble_boxes_nms import nms
from .ensemble_boxes_nms import soft_nms
from .ensemble_boxes_wbf_3d import weighted_boxes_fusion_3d
| 39.333333
| 59
| 0.861582
| 55
| 354
| 5.072727
| 0.4
| 0.258065
| 0.365591
| 0.215054
| 0.301075
| 0.207885
| 0
| 0
| 0
| 0
| 0
| 0.009288
| 0.087571
| 354
| 9
| 59
| 39.333333
| 0.854489
| 0.036723
| 0
| 0
| 0
| 0
| 0.102941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7fd14f6753ca35ce9c471c3412feedb1e337095
| 16,068
|
py
|
Python
|
models/backbones/resnet.py
|
crutcher/stylelens
|
8df3704f56fe6a30395eadcb1aee2e11563dfabb
|
[
"MIT"
] | null | null | null |
models/backbones/resnet.py
|
crutcher/stylelens
|
8df3704f56fe6a30395eadcb1aee2e11563dfabb
|
[
"MIT"
] | null | null | null |
models/backbones/resnet.py
|
crutcher/stylelens
|
8df3704f56fe6a30395eadcb1aee2e11563dfabb
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Libraries
# ------------------------------------------------------------------------------
from collections import OrderedDict
import torch.nn as nn
from timm.models.resnet import BasicBlock, Bottleneck
from timm.models.resnet import ResNet as BaseResNet
from timm.models.resnet import default_cfgs, load_pretrained
from base import BaseBackboneWrapper
# ------------------------------------------------------------------------------
# ResNetBlock
# ------------------------------------------------------------------------------
class ResNetBasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels):
super(ResNetBasicBlock, self).__init__()
downsample = nn.Sequential(
OrderedDict(
[
(
"conv",
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
),
("bn", nn.BatchNorm2d(out_channels)),
]
)
)
self.block = BasicBlock(
in_channels,
int(out_channels / BasicBlock.expansion),
downsample=downsample,
)
def forward(self, x):
x = self.block(x)
return x
class ResNetBottleneckBlock(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels):
super(ResNetBottleneckBlock, self).__init__()
downsample = nn.Sequential(
OrderedDict(
[
(
"conv",
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
),
("bn", nn.BatchNorm2d(out_channels)),
]
)
)
self.block = Bottleneck(
in_channels,
int(out_channels / Bottleneck.expansion),
downsample=downsample,
)
def forward(self, x):
x = self.block(x)
return x
# ------------------------------------------------------------------------------
# ResNet
# ------------------------------------------------------------------------------
class ResNet(BaseResNet, BaseBackboneWrapper):
def __init__(self, block, layers, frozen_stages=-1, norm_eval=False, **kargs):
super(ResNet, self).__init__(block=block, layers=layers, **kargs)
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
def forward(self, input):
# Stem
x1 = self.conv1(input)
x1 = self.bn1(x1)
x1 = self.relu(x1)
# Stage1
x2 = self.maxpool(x1)
x2 = self.layer1(x2)
# Stage2
x3 = self.layer2(x2)
# Stage3
x4 = self.layer3(x3)
# Stage4
x5 = self.layer4(x4)
# Output
return x1, x2, x3, x4, x5
def init_from_imagenet(self, archname):
load_pretrained(self, default_cfgs[archname], self.num_classes)
def _freeze_stages(self):
# Freeze stem
if self.frozen_stages >= 0:
self.bn1.eval()
for module in [self.conv1, self.bn1]:
for param in module.parameters():
param.requires_grad = False
# Chosen subsequent blocks are also frozen
for stage_idx in range(1, self.frozen_stages + 1):
for module in getattr(self, "layer%d" % (stage_idx)):
module.eval()
for param in module.parameters():
param.requires_grad = False
# ------------------------------------------------------------------------------
# Versions of ResNet
# ------------------------------------------------------------------------------
def resnet18(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-18 model."""
default_cfg = default_cfgs["resnet18"]
model = ResNet(
BasicBlock, [2, 2, 2, 2], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnet34(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-34 model."""
default_cfg = default_cfgs["resnet34"]
model = ResNet(
BasicBlock, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnet26(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-26 model."""
default_cfg = default_cfgs["resnet26"]
model = ResNet(
Bottleneck, [2, 2, 2, 2], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnet26d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-26 v1d model.
This is technically a 28 layer ResNet, sticking with 'd' modifier from Gluon for now.
"""
default_cfg = default_cfgs["resnet26d"]
model = ResNet(
Bottleneck,
[2, 2, 2, 2],
stem_width=32,
deep_stem=True,
avg_down=True,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnet50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-50 model."""
default_cfg = default_cfgs["resnet50"]
model = ResNet(
Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnet101(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-101 model."""
default_cfg = default_cfgs["resnet101"]
model = ResNet(
Bottleneck, [3, 4, 23, 3], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnet152(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-152 model."""
default_cfg = default_cfgs["resnet152"]
model = ResNet(
Bottleneck, [3, 8, 36, 3], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def tv_resnet34(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-34 model with original Torchvision weights."""
model = ResNet(
BasicBlock, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfgs["tv_resnet34"]
if pretrained:
load_pretrained(model, model.default_cfg, num_classes, in_chans)
return model
def tv_resnet50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNet-50 model with original Torchvision weights."""
model = ResNet(
Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, **kwargs
)
model.default_cfg = default_cfgs["tv_resnet50"]
if pretrained:
load_pretrained(model, model.default_cfg, num_classes, in_chans)
return model
def wide_resnet50_2(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a Wide ResNet-50-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
model = ResNet(
Bottleneck,
[3, 4, 6, 3],
base_width=128,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfgs["wide_resnet50_2"]
if pretrained:
load_pretrained(model, model.default_cfg, num_classes, in_chans)
return model
def wide_resnet101_2(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a Wide ResNet-101-2 model.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same.
"""
model = ResNet(
Bottleneck,
[3, 4, 23, 3],
base_width=128,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfgs["wide_resnet101_2"]
if pretrained:
load_pretrained(model, model.default_cfg, num_classes, in_chans)
return model
def resnext50_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt50-32x4d model."""
default_cfg = default_cfgs["resnext50_32x4d"]
model = ResNet(
Bottleneck,
[3, 4, 6, 3],
cardinality=32,
base_width=4,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnext50d_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample"""
default_cfg = default_cfgs["resnext50d_32x4d"]
model = ResNet(
Bottleneck,
[3, 4, 6, 3],
cardinality=32,
base_width=4,
stem_width=32,
deep_stem=True,
avg_down=True,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnext101_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt-101 32x4d model."""
default_cfg = default_cfgs["resnext101_32x4d"]
model = ResNet(
Bottleneck,
[3, 4, 23, 3],
cardinality=32,
base_width=4,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnext101_32x8d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt-101 32x8d model."""
default_cfg = default_cfgs["resnext101_32x8d"]
model = ResNet(
Bottleneck,
[3, 4, 23, 3],
cardinality=32,
base_width=8,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def resnext101_64x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt101-64x4d model."""
default_cfg = default_cfgs["resnext101_32x4d"]
model = ResNet(
Bottleneck,
[3, 4, 23, 3],
cardinality=64,
base_width=4,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def tv_resnext50_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt50-32x4d model with original Torchvision weights."""
default_cfg = default_cfgs["tv_resnext50_32x4d"]
model = ResNet(
Bottleneck,
[3, 4, 6, 3],
cardinality=32,
base_width=4,
num_classes=num_classes,
in_chans=in_chans,
**kwargs
)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def ig_resnext101_32x8d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
Args:
pretrained (bool): load pretrained weights
num_classes (int): number of classes for classifier (default: 1000 for pretrained)
in_chans (int): number of input planes (default: 3 for pretrained / color)
"""
default_cfg = default_cfgs["ig_resnext101_32x8d"]
model = ResNet(Bottleneck, [3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def ig_resnext101_32x16d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
Args:
pretrained (bool): load pretrained weights
num_classes (int): number of classes for classifier (default: 1000 for pretrained)
in_chans (int): number of input planes (default: 3 for pretrained / color)
"""
default_cfg = default_cfgs["ig_resnext101_32x16d"]
model = ResNet(Bottleneck, [3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def ig_resnext101_32x32d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
Args:
pretrained (bool): load pretrained weights
num_classes (int): number of classes for classifier (default: 1000 for pretrained)
in_chans (int): number of input planes (default: 3 for pretrained / color)
"""
default_cfg = default_cfgs["ig_resnext101_32x32d"]
model = ResNet(Bottleneck, [3, 4, 23, 3], cardinality=32, base_width=32, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
def ig_resnext101_32x48d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
Args:
pretrained (bool): load pretrained weights
num_classes (int): number of classes for classifier (default: 1000 for pretrained)
in_chans (int): number of input planes (default: 3 for pretrained / color)
"""
default_cfg = default_cfgs["ig_resnext101_32x48d"]
model = ResNet(Bottleneck, [3, 4, 23, 3], cardinality=32, base_width=48, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model
| 35.236842
| 97
| 0.627521
| 1,953
| 16,068
| 4.952381
| 0.111111
| 0.083747
| 0.080645
| 0.066791
| 0.837883
| 0.80428
| 0.798904
| 0.780294
| 0.772643
| 0.762717
| 0
| 0.052735
| 0.238798
| 16,068
| 455
| 98
| 35.314286
| 0.738043
| 0.255539
| 0
| 0.616613
| 0
| 0
| 0.026334
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092652
| false
| 0
| 0.019169
| 0
| 0.204473
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c51382a38a7e8c406e1db5aa246ad6bdb7e3cd0b
| 45,129
|
py
|
Python
|
tests/test_filtering.py
|
SimonHurst/py-pdf-parser
|
4186115b64115e9916475d4a034542a64f57457b
|
[
"MIT"
] | null | null | null |
tests/test_filtering.py
|
SimonHurst/py-pdf-parser
|
4186115b64115e9916475d4a034542a64f57457b
|
[
"MIT"
] | null | null | null |
tests/test_filtering.py
|
SimonHurst/py-pdf-parser
|
4186115b64115e9916475d4a034542a64f57457b
|
[
"MIT"
] | null | null | null |
import re
from mock import patch, call
from py_pdf_parser.components import PDFDocument, PDFElement
from py_pdf_parser.common import BoundingBox
from py_pdf_parser.exceptions import NoElementFoundError, MultipleElementsFoundError
from py_pdf_parser.filtering import ElementList
from py_pdf_parser.loaders import Page
from .base import BaseTestCase
from .utils import FakePDFMinerTextElement, create_pdf_document
class TestFiltering(BaseTestCase):
def setUp(self):
self.elem1 = FakePDFMinerTextElement()
self.elem2 = FakePDFMinerTextElement()
self.elem3 = FakePDFMinerTextElement()
self.elem4 = FakePDFMinerTextElement()
self.elem5 = FakePDFMinerTextElement()
self.elem6 = FakePDFMinerTextElement()
self.doc = create_pdf_document(
[self.elem1, self.elem2, self.elem3, self.elem4, self.elem5, self.elem6]
)
self.elem_list = self.doc.elements
def test_add_tag_to_elements(self):
self.elem_list.add_tag_to_elements("foo")
for elem in self.elem_list:
self.assertIn("foo", elem.tags)
def test_ignored_elements_are_excluded(self):
self.assertEqual(len(self.doc.elements), len(self.elem_list))
self.elem_list[0].ignore()
self.assertEqual(len(self.doc.elements), len(self.elem_list) - 1)
self.assertNotIn(self.elem_list[0], self.doc.elements)
def test_filter_by_tag(self):
self.assertEqual(len(self.elem_list.filter_by_tag("foo")), 0)
self.elem_list[0].add_tag("foo")
self.assertEqual(len(self.elem_list.filter_by_tag("foo")), 1)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_tag("foo"))
self.elem_list[1].add_tag("bar")
self.assertEqual(len(self.elem_list.filter_by_tag("foo")), 1)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_tag("foo"))
self.elem_list[2].add_tag("foo")
self.assertEqual(len(self.elem_list.filter_by_tag("foo")), 2)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_tag("foo"))
self.assertIn(self.elem_list[2], self.elem_list.filter_by_tag("foo"))
def test_filter_by_tags(self):
self.assertEqual(len(self.elem_list.filter_by_tags("foo", "bar")), 0)
self.elem_list[0].add_tag("foo")
self.assertEqual(len(self.elem_list.filter_by_tags("foo", "bar")), 1)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_tags("foo", "bar"))
self.elem_list[1].add_tag("bar")
self.assertEqual(len(self.elem_list.filter_by_tags("foo", "bar")), 2)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_tags("foo", "bar"))
self.assertIn(self.elem_list[1], self.elem_list.filter_by_tags("foo", "bar"))
self.elem_list[2].add_tag("foo")
self.assertEqual(len(self.elem_list.filter_by_tags("foo", "bar")), 3)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_tags("foo", "bar"))
self.assertIn(self.elem_list[1], self.elem_list.filter_by_tags("foo", "bar"))
self.assertIn(self.elem_list[2], self.elem_list.filter_by_tags("foo", "bar"))
self.elem_list[3].add_tag("baz")
self.assertEqual(len(self.elem_list.filter_by_tags("foo", "bar")), 3)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_tags("foo", "bar"))
self.assertIn(self.elem_list[1], self.elem_list.filter_by_tags("foo", "bar"))
self.assertIn(self.elem_list[2], self.elem_list.filter_by_tags("foo", "bar"))
def test_filter_by_text_equal(self):
elem1 = FakePDFMinerTextElement(text="foo")
elem2 = FakePDFMinerTextElement(text="bar")
elem3 = FakePDFMinerTextElement(text="foobar")
elem4 = FakePDFMinerTextElement(text="baz")
doc = create_pdf_document([elem1, elem2, elem3, elem4])
self.assertEqual(len(doc.elements.filter_by_text_equal("hello")), 0)
self.assertEqual(len(doc.elements.filter_by_text_equal("baz")), 1)
self.assert_original_element_in(elem4, doc.elements.filter_by_text_equal("baz"))
self.assertEqual(len(doc.elements.filter_by_text_equal("foo")), 1)
self.assert_original_element_in(elem1, doc.elements.filter_by_text_equal("foo"))
def test_filter_by_text_contains(self):
elem1 = FakePDFMinerTextElement(text="foo")
elem2 = FakePDFMinerTextElement(text="bar")
elem3 = FakePDFMinerTextElement(text="foobar")
elem4 = FakePDFMinerTextElement(text="baz")
doc = create_pdf_document([elem1, elem2, elem3, elem4])
self.assertEqual(len(doc.elements.filter_by_text_contains("hello")), 0)
self.assertEqual(len(doc.elements.filter_by_text_contains("baz")), 1)
self.assert_original_element_in(
elem4, doc.elements.filter_by_text_contains("baz")
)
self.assertEqual(len(doc.elements.filter_by_text_contains("foo")), 2)
self.assert_original_element_in(
elem1, doc.elements.filter_by_text_contains("foo")
)
self.assert_original_element_in(
elem3, doc.elements.filter_by_text_contains("foo")
)
def test_filter_by_regex(self):
elem1 = FakePDFMinerTextElement(text="foo 1")
elem2 = FakePDFMinerTextElement(text="foo")
elem3 = FakePDFMinerTextElement(text="foo 987 ")
elem4 = FakePDFMinerTextElement(text=" Foo 100")
doc = create_pdf_document([elem1, elem2, elem3, elem4])
self.assertEqual(len(doc.elements.filter_by_regex(r"^\d+$")), 0)
filter_result = doc.elements.filter_by_regex(r"^foo \d+$")
self.assertEqual(len(filter_result), 2)
self.assert_original_element_in(elem1, filter_result)
self.assert_original_element_in(elem3, filter_result)
# Test with a regex flag to ignore the case
filter_result = doc.elements.filter_by_regex(
r"^foo \d+$", regex_flags=re.IGNORECASE
)
self.assertEqual(len(filter_result), 3)
self.assert_original_element_in(elem1, filter_result)
self.assert_original_element_in(elem3, filter_result)
self.assert_original_element_in(elem4, filter_result)
# Test with non stripped text
filter_result = doc.elements.filter_by_regex(r"^foo \d+$", stripped=False)
self.assertEqual(len(filter_result), 1)
self.assert_original_element_in(elem1, filter_result)
# Test with a regex flag to ignore the case and non stripped text, while giving
# a regex with an empty space
filter_result = doc.elements.filter_by_regex(
r"^ foo \d+$", regex_flags=re.IGNORECASE, stripped=False
)
self.assertEqual(len(filter_result), 1)
self.assert_original_element_in(elem4, filter_result)
def test_filter_by_font(self):
elem1 = FakePDFMinerTextElement(font_name="foo", font_size=2)
elem2 = FakePDFMinerTextElement(font_name="bar", font_size=3)
doc = create_pdf_document([elem1, elem2])
self.assertEqual(len(doc.elements.filter_by_font("hello,1")), 0)
self.assertEqual(len(doc.elements.filter_by_font("foo,2")), 1)
# Check if "foo,2" has been added to cache
self.assertEqual(doc._element_indexes_by_font, {"foo,2": set([0])})
self.assert_original_element_in(elem1, doc.elements.filter_by_font("foo,2"))
# Check we can still filter for another font which is not in cache
self.assertEqual(len(doc.elements.filter_by_font("bar,3")), 1)
self.assertEqual(
doc._element_indexes_by_font, {"foo,2": set([0]), "bar,3": set([1])}
)
self.assert_original_element_in(elem2, doc.elements.filter_by_font("bar,3"))
doc = create_pdf_document([elem1, elem2], font_mapping={"foo,2": "font_a"})
self.assertEqual(len(doc.elements.filter_by_font("hello,1")), 0)
self.assertEqual(len(doc.elements.filter_by_font("foo,2")), 0)
self.assertEqual(len(doc.elements.filter_by_font("font_a")), 1)
# Check if "font_a" has been added to cache
self.assertEqual(doc._element_indexes_by_font, {"font_a": set([0])})
self.assert_original_element_in(elem1, doc.elements.filter_by_font("font_a"))
def test_filter_by_fonts(self):
elem1 = FakePDFMinerTextElement(font_name="foo", font_size=2)
elem2 = FakePDFMinerTextElement(font_name="bar", font_size=3)
elem3 = FakePDFMinerTextElement(font_name="baz", font_size=3)
doc = create_pdf_document([elem1, elem2, elem3])
self.assertEqual(len(doc.elements.filter_by_fonts("hello,1")), 0)
self.assertEqual(len(doc.elements.filter_by_fonts("foo,2", "bar,3")), 2)
# Check if "foo,2" and "bar,3" have been added to cache
self.assertEqual(
doc._element_indexes_by_font, {"foo,2": set([0]), "bar,3": set([1])}
)
self.assert_original_element_in(
elem1, doc.elements.filter_by_fonts("foo,2", "bar,3")
)
self.assert_original_element_in(
elem2, doc.elements.filter_by_fonts("foo,2", "bar,3")
)
doc = create_pdf_document(
[elem1, elem2, elem3],
font_mapping={"foo,2": "font_a", "bar,3": "font_b", "baz,3": "font_c"},
)
self.assertEqual(len(doc.elements.filter_by_font("hello,1")), 0)
self.assertEqual(len(doc.elements.filter_by_fonts("foo,2", "bar,3")), 0)
self.assertEqual(len(doc.elements.filter_by_fonts("font_a", "font_b")), 2)
# Check if "font_a" and "font_b" have been added to cache
self.assertEqual(
doc._element_indexes_by_font, {"font_a": set([0]), "font_b": set([1])}
)
self.assert_original_element_in(
elem1, doc.elements.filter_by_fonts("font_a", "font_b")
)
self.assert_original_element_in(
elem2, doc.elements.filter_by_fonts("font_a", "font_b")
)
# Check we can still filter for another font which is not in cache
self.assertEqual(len(doc.elements.filter_by_fonts("font_b", "font_c")), 2)
self.assert_original_element_in(
elem2, doc.elements.filter_by_fonts("font_b", "font_c")
)
self.assert_original_element_in(
elem3, doc.elements.filter_by_fonts("font_b", "font_c")
)
self.assertEqual(
doc._element_indexes_by_font,
{"font_a": set([0]), "font_b": set([1]), "font_c": set([2])},
)
def test_filter_by_page(self):
elem1 = FakePDFMinerTextElement()
elem2 = FakePDFMinerTextElement()
elem3 = FakePDFMinerTextElement()
page1 = Page(width=100, height=100, elements=[elem1, elem2])
page2 = Page(width=100, height=100, elements=[elem3])
doc = PDFDocument({1: page1, 2: page2})
self.assertEqual(len(doc.elements.filter_by_page(1)), 2)
self.assert_original_element_in(elem1, doc.elements.filter_by_page(1))
self.assert_original_element_in(elem2, doc.elements.filter_by_page(1))
def test_filter_by_pages(self):
elem1 = FakePDFMinerTextElement()
elem2 = FakePDFMinerTextElement()
elem3 = FakePDFMinerTextElement()
elem4 = FakePDFMinerTextElement()
page1 = Page(width=100, height=100, elements=[elem1, elem2])
page2 = Page(width=100, height=100, elements=[elem3])
page3 = Page(width=100, height=100, elements=[elem4])
doc = PDFDocument({1: page1, 2: page2, 3: page3})
self.assertEqual(len(doc.elements.filter_by_pages(1, 2)), 3)
self.assert_original_element_in(elem1, doc.elements.filter_by_pages(1, 2))
self.assert_original_element_in(elem2, doc.elements.filter_by_pages(1, 2))
self.assert_original_element_in(elem3, doc.elements.filter_by_pages(1, 2))
def test_filter_by_section_name(self):
self.doc.sectioning.create_section("foo", self.elem_list[0], self.elem_list[1])
self.assertEqual(len(self.elem_list.filter_by_section_name("foo")), 2)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_section_name("foo"))
self.assertIn(self.elem_list[1], self.elem_list.filter_by_section_name("foo"))
self.doc.sectioning.create_section("foo", self.elem_list[3], self.elem_list[5])
self.assertEqual(len(self.elem_list.filter_by_section_name("foo")), 5)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_section_name("foo"))
self.assertIn(self.elem_list[1], self.elem_list.filter_by_section_name("foo"))
self.assertIn(self.elem_list[3], self.elem_list.filter_by_section_name("foo"))
self.assertIn(self.elem_list[4], self.elem_list.filter_by_section_name("foo"))
self.assertIn(self.elem_list[5], self.elem_list.filter_by_section_name("foo"))
def test_filter_by_section_names(self):
self.doc.sectioning.create_section("foo", self.elem_list[0], self.elem_list[1])
self.doc.sectioning.create_section("bar", self.elem_list[3], self.elem_list[5])
self.doc.sectioning.create_section("foo", self.elem_list[3], self.elem_list[5])
self.assertEqual(len(self.elem_list.filter_by_section_names("foo", "bar")), 5)
self.assertIn(
self.elem_list[0], self.elem_list.filter_by_section_names("foo", "bar")
)
self.assertIn(
self.elem_list[1], self.elem_list.filter_by_section_names("foo", "bar")
)
self.assertIn(
self.elem_list[3], self.elem_list.filter_by_section_names("foo", "bar")
)
self.assertIn(
self.elem_list[4], self.elem_list.filter_by_section_names("foo", "bar")
)
self.assertIn(
self.elem_list[5], self.elem_list.filter_by_section_names("foo", "bar")
)
def test_filter_by_section(self):
self.doc.sectioning.create_section("foo", self.elem_list[0], self.elem_list[1])
self.doc.sectioning.create_section("foo", self.elem_list[3], self.elem_list[5])
self.assertEqual(len(self.elem_list.filter_by_section("foo_0")), 2)
self.assertIn(self.elem_list[0], self.elem_list.filter_by_section("foo_0"))
self.assertIn(self.elem_list[1], self.elem_list.filter_by_section("foo_0"))
# Filtering for non-existent section should return empty ElementList
self.assertEqual(len(self.elem_list.filter_by_section("bar")), 0)
def test_filter_by_sections(self):
self.doc.sectioning.create_section("foo", self.elem_list[0], self.elem_list[1])
self.doc.sectioning.create_section("foo", self.elem_list[3], self.elem_list[5])
self.assertEqual(len(self.elem_list.filter_by_sections("foo_0", "foo_1")), 5)
self.assertIn(
self.elem_list[0], self.elem_list.filter_by_sections("foo_0", "foo_1")
)
self.assertIn(
self.elem_list[1], self.elem_list.filter_by_sections("foo_0", "foo_1")
)
self.assertIn(
self.elem_list[3], self.elem_list.filter_by_sections("foo_0", "foo_1")
)
self.assertIn(
self.elem_list[4], self.elem_list.filter_by_sections("foo_0", "foo_1")
)
self.assertIn(
self.elem_list[5], self.elem_list.filter_by_sections("foo_0", "foo_1")
)
def test_ignore_elements(self):
self.elem_list.ignore_elements()
self.assertTrue(self.elem_list[0].ignored)
self.assertTrue(self.elem_list[1].ignored)
self.assertTrue(self.elem_list[2].ignored)
self.assertTrue(self.elem_list[3].ignored)
self.assertTrue(self.elem_list[4].ignored)
self.assertTrue(self.elem_list[5].ignored)
self.assertEqual(0, len(self.doc.elements))
self.assertEqual(self.doc._ignored_indexes, set([0, 1, 2, 3, 4, 5]))
@patch.object(PDFElement, "partially_within", autospec=True)
def test_to_the_right_of(self, partially_within_mock):
partially_within_mock.side_effect = (
lambda self, bounding_box: self.text() == "within"
)
elem1 = FakePDFMinerTextElement(
text="within", bounding_box=BoundingBox(50, 51, 50, 51)
)
elem2 = FakePDFMinerTextElement(text="within")
elem3 = FakePDFMinerTextElement()
elem4 = FakePDFMinerTextElement(text="within")
elem5 = FakePDFMinerTextElement()
elem6 = FakePDFMinerTextElement(text="within")
page1 = Page(elements=[elem1, elem2, elem3, elem4], width=100, height=100)
page2 = Page(elements=[elem5, elem6], width=100, height=100)
doc = PDFDocument(pages={1: page1, 2: page2})
elem_list = doc.elements
pdf_elem1 = self.extract_element_from_list(elem1, elem_list)
pdf_elem2 = self.extract_element_from_list(elem2, elem_list)
pdf_elem3 = self.extract_element_from_list(elem3, elem_list)
pdf_elem4 = self.extract_element_from_list(elem4, elem_list)
result = elem_list.to_the_right_of(pdf_elem1)
# expected_bbox is from the right edge of elem1 to the right edge of the page
expected_bbox = BoundingBox(51, 100, 50, 51)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 2)
self.assertIn(pdf_elem2, result)
self.assertIn(pdf_elem4, result)
# Also test with inclusive=True
partially_within_mock.reset_mock()
result = elem_list.to_the_right_of(pdf_elem1, inclusive=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem1, result)
self.assertIn(pdf_elem2, result)
self.assertIn(pdf_elem4, result)
# Test specifying tolerance
expected_bbox = BoundingBox(51, 100, 50.1, 50.9)
partially_within_mock.reset_mock()
result = elem_list.to_the_right_of(pdf_elem1, tolerance=0.1)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
@patch.object(PDFElement, "partially_within", autospec=True)
def test_to_the_left_of(self, partially_within_mock):
partially_within_mock.side_effect = (
lambda self, bounding_box: self.text() == "within"
)
elem1 = FakePDFMinerTextElement(
text="within", bounding_box=BoundingBox(50, 51, 50, 51)
)
elem2 = FakePDFMinerTextElement(text="within")
elem3 = FakePDFMinerTextElement()
elem4 = FakePDFMinerTextElement(text="within")
elem5 = FakePDFMinerTextElement()
elem6 = FakePDFMinerTextElement(text="within")
page1 = Page(elements=[elem1, elem2, elem3, elem4], width=100, height=100)
page2 = Page(elements=[elem5, elem6], width=100, height=100)
doc = PDFDocument(pages={1: page1, 2: page2})
elem_list = doc.elements
pdf_elem1 = self.extract_element_from_list(elem1, elem_list)
pdf_elem2 = self.extract_element_from_list(elem2, elem_list)
pdf_elem3 = self.extract_element_from_list(elem3, elem_list)
pdf_elem4 = self.extract_element_from_list(elem4, elem_list)
result = elem_list.to_the_left_of(pdf_elem1)
# expected_bbox is from the left edge of elem1 to the left edge of the page
expected_bbox = BoundingBox(0, 50, 50, 51)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 2)
self.assertIn(pdf_elem2, result)
self.assertIn(pdf_elem4, result)
# Also test with inclusive=True
partially_within_mock.reset_mock()
result = elem_list.to_the_left_of(pdf_elem1, inclusive=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem1, result)
self.assertIn(pdf_elem2, result)
self.assertIn(pdf_elem4, result)
# Test specifying tolerance
expected_bbox = BoundingBox(0, 50, 50.1, 50.9)
partially_within_mock.reset_mock()
result = elem_list.to_the_left_of(pdf_elem1, tolerance=0.1)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
@patch.object(PDFElement, "partially_within", autospec=True)
def test_below(self, partially_within_mock):
partially_within_mock.side_effect = (
lambda self, bounding_box: self.text() == "within"
)
elem1 = FakePDFMinerTextElement(text="within")
elem2 = FakePDFMinerTextElement()
elem3 = FakePDFMinerTextElement(
text="within", bounding_box=BoundingBox(50, 51, 50, 51)
)
elem4 = FakePDFMinerTextElement(text="within")
elem5 = FakePDFMinerTextElement()
elem6 = FakePDFMinerTextElement(text="within")
elem7 = FakePDFMinerTextElement()
elem8 = FakePDFMinerTextElement(text="within")
page1 = Page(elements=[elem1, elem2], width=100, height=100)
page2 = Page(elements=[elem3, elem4, elem5, elem6], width=100, height=100)
page3 = Page(elements=[elem7, elem8], width=100, height=100)
doc = PDFDocument(pages={1: page1, 2: page2, 3: page3})
elem_list = doc.elements
pdf_elem3 = self.extract_element_from_list(elem3, elem_list)
pdf_elem4 = self.extract_element_from_list(elem4, elem_list)
pdf_elem5 = self.extract_element_from_list(elem5, elem_list)
pdf_elem6 = self.extract_element_from_list(elem6, elem_list)
pdf_elem7 = self.extract_element_from_list(elem7, elem_list)
pdf_elem8 = self.extract_element_from_list(elem8, elem_list)
result = elem_list.below(pdf_elem3)
# expected_bbox is from the left edge of elem1 to the left edge of the page
expected_bbox = BoundingBox(50, 51, 0, 50)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 2)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
# Also test with inclusive=True
partially_within_mock.reset_mock()
result = elem_list.below(pdf_elem3, inclusive=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem3, result)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
# Also test with all_pages=True
partially_within_mock.reset_mock()
result = elem_list.below(pdf_elem3, all_pages=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
call(pdf_elem7, BoundingBox(50, 51, 0, 100)),
call(pdf_elem8, BoundingBox(50, 51, 0, 100)),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
self.assertIn(pdf_elem8, result)
# Test specifying tolerance
expected_bbox = BoundingBox(50.1, 50.9, 0, 50)
partially_within_mock.reset_mock()
result = elem_list.below(pdf_elem3, tolerance=0.1)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
@patch.object(PDFElement, "partially_within", autospec=True)
def test_above(self, partially_within_mock):
partially_within_mock.side_effect = (
lambda self, bounding_box: self.text() == "within"
)
elem1 = FakePDFMinerTextElement(text="within")
elem2 = FakePDFMinerTextElement()
elem3 = FakePDFMinerTextElement(
text="within", bounding_box=BoundingBox(50, 51, 50, 51)
)
elem4 = FakePDFMinerTextElement(text="within")
elem5 = FakePDFMinerTextElement()
elem6 = FakePDFMinerTextElement(text="within")
elem7 = FakePDFMinerTextElement()
elem8 = FakePDFMinerTextElement(text="within")
page1 = Page(elements=[elem1, elem2], width=100, height=100)
page2 = Page(elements=[elem3, elem4, elem5, elem6], width=100, height=100)
page3 = Page(elements=[elem7, elem8], width=100, height=100)
doc = PDFDocument(pages={1: page1, 2: page2, 3: page3})
elem_list = doc.elements
pdf_elem1 = self.extract_element_from_list(elem1, elem_list)
pdf_elem2 = self.extract_element_from_list(elem2, elem_list)
pdf_elem3 = self.extract_element_from_list(elem3, elem_list)
pdf_elem4 = self.extract_element_from_list(elem4, elem_list)
pdf_elem5 = self.extract_element_from_list(elem5, elem_list)
pdf_elem6 = self.extract_element_from_list(elem6, elem_list)
result = elem_list.above(pdf_elem3)
# expected_bbox is from the left edge of elem1 to the left edge of the page
expected_bbox = BoundingBox(50, 51, 51, 100)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 2)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
# Also test with inclusive=True
partially_within_mock.reset_mock()
result = elem_list.above(pdf_elem3, inclusive=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem3, result)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
# Also test with all_pages=True
partially_within_mock.reset_mock()
result = elem_list.above(pdf_elem3, all_pages=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, BoundingBox(50, 51, 0, 100)),
call(pdf_elem2, BoundingBox(50, 51, 0, 100)),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem1, result)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
# Test specifying tolerance
expected_bbox = BoundingBox(50.1, 50.9, 51, 100)
partially_within_mock.reset_mock()
result = elem_list.above(pdf_elem3, tolerance=0.1)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
@patch.object(PDFElement, "partially_within", autospec=True)
def test_vertically_in_line_with(self, partially_within_mock):
partially_within_mock.side_effect = (
lambda self, bounding_box: self.text() == "within"
)
elem1 = FakePDFMinerTextElement(text="within")
elem2 = FakePDFMinerTextElement()
elem3 = FakePDFMinerTextElement(
text="within", bounding_box=BoundingBox(50, 51, 50, 51)
)
elem4 = FakePDFMinerTextElement(text="within")
elem5 = FakePDFMinerTextElement()
elem6 = FakePDFMinerTextElement(text="within")
elem7 = FakePDFMinerTextElement()
elem8 = FakePDFMinerTextElement(text="within")
page1 = Page(elements=[elem1, elem2], width=100, height=100)
page2 = Page(elements=[elem3, elem4, elem5, elem6], width=100, height=100)
page3 = Page(elements=[elem7, elem8], width=100, height=100)
doc = PDFDocument(pages={1: page1, 2: page2, 3: page3})
elem_list = doc.elements
pdf_elem1 = self.extract_element_from_list(elem1, elem_list)
pdf_elem2 = self.extract_element_from_list(elem2, elem_list)
pdf_elem3 = self.extract_element_from_list(elem3, elem_list)
pdf_elem4 = self.extract_element_from_list(elem4, elem_list)
pdf_elem5 = self.extract_element_from_list(elem5, elem_list)
pdf_elem6 = self.extract_element_from_list(elem6, elem_list)
pdf_elem7 = self.extract_element_from_list(elem7, elem_list)
pdf_elem8 = self.extract_element_from_list(elem8, elem_list)
result = elem_list.vertically_in_line_with(pdf_elem3)
# expected_bbox is from the left edge of elem1 to the left edge of the page
expected_bbox = BoundingBox(50, 51, 0, 100)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 2)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
# Also test with inclusive=True
partially_within_mock.reset_mock()
result = elem_list.vertically_in_line_with(pdf_elem3, inclusive=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem3, result)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
# Also test with all_pages=True
partially_within_mock.reset_mock()
result = elem_list.vertically_in_line_with(pdf_elem3, all_pages=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
call(pdf_elem7, expected_bbox),
call(pdf_elem8, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 4)
self.assertIn(pdf_elem1, result)
self.assertIn(pdf_elem4, result)
self.assertIn(pdf_elem6, result)
self.assertIn(pdf_elem8, result)
# Test specifying tolerance
expected_bbox = BoundingBox(50.1, 50.9, 0, 100)
partially_within_mock.reset_mock()
result = elem_list.vertically_in_line_with(pdf_elem3, tolerance=0.1)
partially_within_mock.assert_has_calls(
[
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
call(pdf_elem5, expected_bbox),
call(pdf_elem6, expected_bbox),
],
any_order=True,
)
@patch.object(PDFElement, "partially_within", autospec=True)
def test_horizontally_in_line_with(self, partially_within_mock):
partially_within_mock.side_effect = (
lambda self, bounding_box: self.text() == "within"
)
elem1 = FakePDFMinerTextElement(
text="within", bounding_box=BoundingBox(50, 51, 50, 51)
)
elem2 = FakePDFMinerTextElement(text="within")
elem3 = FakePDFMinerTextElement()
elem4 = FakePDFMinerTextElement(text="within")
elem5 = FakePDFMinerTextElement()
elem6 = FakePDFMinerTextElement(text="within")
page1 = Page(elements=[elem1, elem2, elem3, elem4], width=100, height=100)
page2 = Page(elements=[elem5, elem6], width=100, height=100)
doc = PDFDocument(pages={1: page1, 2: page2})
elem_list = doc.elements
pdf_elem1 = self.extract_element_from_list(elem1, elem_list)
pdf_elem2 = self.extract_element_from_list(elem2, elem_list)
pdf_elem3 = self.extract_element_from_list(elem3, elem_list)
pdf_elem4 = self.extract_element_from_list(elem4, elem_list)
result = elem_list.horizontally_in_line_with(pdf_elem1)
# expected_bbox is from the left edge of elem1 to the left edge of the page
expected_bbox = BoundingBox(0, 100, 50, 51)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 2)
self.assertIn(pdf_elem2, result)
self.assertIn(pdf_elem4, result)
# Also test with inclusive=True
partially_within_mock.reset_mock()
result = elem_list.horizontally_in_line_with(pdf_elem1, inclusive=True)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem1, result)
self.assertIn(pdf_elem2, result)
self.assertIn(pdf_elem4, result)
# Test specifying tolerance
expected_bbox = BoundingBox(0, 100, 50.1, 50.9)
partially_within_mock.reset_mock()
result = elem_list.horizontally_in_line_with(pdf_elem1, tolerance=0.1)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
@patch.object(PDFElement, "partially_within", autospec=True)
def test_filter_partially_within_bounding_box(self, partially_within_mock):
partially_within_mock.side_effect = (
lambda self, bounding_box: self.text() == "within"
)
elem1 = FakePDFMinerTextElement(text="within")
elem2 = FakePDFMinerTextElement(text="within")
elem3 = FakePDFMinerTextElement()
elem4 = FakePDFMinerTextElement(text="within")
elem5 = FakePDFMinerTextElement()
elem6 = FakePDFMinerTextElement(text="within")
page1 = Page(elements=[elem1, elem2, elem3, elem4], width=100, height=100)
page2 = Page(elements=[elem5, elem6], width=100, height=100)
doc = PDFDocument(pages={1: page1, 2: page2})
elem_list = doc.elements
pdf_elem1 = self.extract_element_from_list(elem1, elem_list)
pdf_elem2 = self.extract_element_from_list(elem2, elem_list)
pdf_elem3 = self.extract_element_from_list(elem3, elem_list)
pdf_elem4 = self.extract_element_from_list(elem4, elem_list)
result = elem_list.filter_partially_within_bounding_box(
BoundingBox(0, 1, 0, 1), 1
)
# expected_bbox is from the left edge of elem1 to the left edge of the page
expected_bbox = BoundingBox(0, 1, 0, 1)
partially_within_mock.assert_has_calls(
[
call(pdf_elem1, expected_bbox),
call(pdf_elem2, expected_bbox),
call(pdf_elem3, expected_bbox),
call(pdf_elem4, expected_bbox),
],
any_order=True,
)
self.assertEqual(len(result), 3)
self.assertIn(pdf_elem1, result)
self.assertIn(pdf_elem2, result)
self.assertIn(pdf_elem4, result)
def test_before(self):
result = self.elem_list.before(self.elem_list[2])
self.assertEqual(len(result), 2)
self.assertIn(self.elem_list[0], result)
self.assertIn(self.elem_list[1], result)
result = self.elem_list.before(self.elem_list[2], inclusive=True)
self.assertEqual(len(result), 3)
self.assertIn(self.elem_list[0], result)
self.assertIn(self.elem_list[1], result)
self.assertIn(self.elem_list[2], result)
def test_after(self):
result = self.elem_list.after(self.elem_list[3])
self.assertEqual(len(result), 2)
self.assertIn(self.elem_list[4], result)
self.assertIn(self.elem_list[5], result)
result = self.elem_list.after(self.elem_list[3], inclusive=True)
self.assertEqual(len(result), 3)
self.assertIn(self.elem_list[3], result)
self.assertIn(self.elem_list[4], result)
self.assertIn(self.elem_list[5], result)
def test_between(self):
result = self.elem_list.between(self.elem_list[2], self.elem_list[5])
self.assertEqual(len(result), 2)
self.assertIn(self.elem_list[3], result)
self.assertIn(self.elem_list[4], result)
result = self.elem_list.between(
self.elem_list[2], self.elem_list[5], inclusive=True
)
self.assertEqual(len(result), 4)
self.assertIn(self.elem_list[2], result)
self.assertIn(self.elem_list[3], result)
self.assertIn(self.elem_list[4], result)
self.assertIn(self.elem_list[5], result)
def test_extract_single_element(self):
with self.assertRaises(MultipleElementsFoundError):
self.elem_list.extract_single_element()
with self.assertRaises(NoElementFoundError):
self.elem_list.filter_by_tag("non_existent_tag").extract_single_element()
elem1 = FakePDFMinerTextElement()
page = Page(elements=[elem1], width=100, height=100)
doc = PDFDocument(pages={1: page})
pdf_elem_1 = self.extract_element_from_list(elem1, doc.elements)
result = doc.elements.extract_single_element()
self.assertEqual(result, pdf_elem_1)
def test_add_element(self):
empty_elem_list = self.elem_list.filter_by_tag("non_existent_tag")
result = empty_elem_list.add_element(self.elem_list[0])
self.assertEqual(len(result), 1)
self.assertIn(self.elem_list[0], result)
result = result.add_element(self.elem_list[0])
self.assertEqual(len(result), 1)
self.assertIn(self.elem_list[0], result)
result = result.add_element(self.elem_list[4])
self.assertEqual(len(result), 2)
self.assertIn(self.elem_list[0], result)
self.assertIn(self.elem_list[4], result)
def test_add_elements(self):
empty_elem_list = self.elem_list.filter_by_tag("non_existent_tag")
result = empty_elem_list.add_elements(self.elem_list[0], self.elem_list[1])
self.assertEqual(len(result), 2)
self.assertIn(self.elem_list[0], result)
self.assertIn(self.elem_list[1], result)
def test_remove_element(self):
original_length = len(self.elem_list)
result = self.elem_list.remove_element(self.elem_list[0])
self.assertEqual(len(result), original_length - 1)
self.assertNotIn(self.elem_list[0], result)
result = result.remove_element(self.elem_list[0])
self.assertEqual(len(result), original_length - 1)
self.assertNotIn(self.elem_list[0], result)
result = result.remove_element(self.elem_list[4])
self.assertEqual(len(result), original_length - 2)
self.assertNotIn(self.elem_list[0], result)
self.assertNotIn(self.elem_list[4], result)
def test_remove_elements(self):
original_length = len(self.elem_list)
result = self.elem_list.remove_elements(self.elem_list[0], self.elem_list[1])
self.assertEqual(len(result), original_length - 2)
self.assertNotIn(self.elem_list[0], result)
self.assertNotIn(self.elem_list[1], result)
def test_repr(self):
self.assertEqual(repr(self.elem_list), "<ElementList of 6 elements>")
def test_getitem(self):
self.assert_original_element_equal(self.elem1, self.elem_list[0])
self.assertIsInstance(self.elem_list[1:3], ElementList)
self.assertEqual(len(self.elem_list[1:3]), 2)
self.assertIn(self.elem_list[1], self.elem_list[1:3])
self.assertIn(self.elem_list[2], self.elem_list[1:3])
def test_eq(self):
with self.assertRaises(NotImplementedError):
self.elem_list == "foo"
second_elem_list = ElementList(self.doc, set([0, 1, 2, 3, 4, 5]))
self.assertTrue(self.elem_list == second_elem_list)
# Test with different indexes
second_elem_list = ElementList(self.doc, set([0, 1, 2, 3, 4]))
self.assertFalse(self.elem_list == second_elem_list)
# Test with different document
doc = PDFDocument(
pages={
1: Page(
elements=[
FakePDFMinerTextElement(),
FakePDFMinerTextElement(),
FakePDFMinerTextElement(),
FakePDFMinerTextElement(),
FakePDFMinerTextElement(),
FakePDFMinerTextElement(),
],
width=100,
height=100,
)
}
)
second_elem_list = ElementList(doc, set([0, 1, 2, 3, 4, 5]))
self.assertFalse(self.elem_list == second_elem_list)
def test_len(self):
self.assertEqual(len(self.elem_list), 6)
def test_sub(self):
list_1 = ElementList(self.doc, set([0, 1, 2, 3, 4, 5]))
list_2 = ElementList(self.doc, set([0, 2]))
result = list_1 - list_2
self.assertEqual(result, ElementList(self.doc, set([1, 3, 4, 5])))
def test_or(self):
list_1 = ElementList(self.doc, set([0, 2]))
list_2 = ElementList(self.doc, set([2, 3, 4]))
result = list_1 | list_2
self.assertEqual(result, ElementList(self.doc, set([0, 2, 3, 4])))
def test_xor(self):
list_1 = ElementList(self.doc, set([0, 2]))
list_2 = ElementList(self.doc, set([2, 3, 4]))
result = list_1 ^ list_2
self.assertEqual(result, ElementList(self.doc, set([0, 3, 4])))
def test_and(self):
list_1 = ElementList(self.doc, set([0, 2]))
list_2 = ElementList(self.doc, set([2, 3, 4]))
result = list_1 & list_2
self.assertEqual(result, ElementList(self.doc, set([2])))
| 40.29375
| 88
| 0.638348
| 5,642
| 45,129
| 4.833392
| 0.033676
| 0.078034
| 0.084048
| 0.049468
| 0.918959
| 0.893106
| 0.876824
| 0.853025
| 0.839347
| 0.805574
| 0
| 0.035491
| 0.249529
| 45,129
| 1,119
| 89
| 40.329759
| 0.769694
| 0.034745
| 0
| 0.600227
| 0
| 0
| 0.026445
| 0
| 0
| 0
| 0
| 0
| 0.291053
| 1
| 0.0453
| false
| 0
| 0.010193
| 0
| 0.056625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3d6fa83aaa7061e3a55d9c3e540e727756dc2c22
| 187
|
py
|
Python
|
venv/Lib/site-packages/dpyConsole/errors.py
|
oOperaho/Cruzeiro
|
57730376e8ac347531984ef49fc1349e084c2b5a
|
[
"MIT"
] | 13
|
2020-10-07T04:21:24.000Z
|
2022-01-31T20:36:55.000Z
|
venv/Lib/site-packages/dpyConsole/errors.py
|
oOperaho/Cruzeiro
|
57730376e8ac347531984ef49fc1349e084c2b5a
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/dpyConsole/errors.py
|
oOperaho/Cruzeiro
|
57730376e8ac347531984ef49fc1349e084c2b5a
|
[
"MIT"
] | 1
|
2021-06-17T00:35:41.000Z
|
2021-06-17T00:35:41.000Z
|
class CommandNotFound(Exception):
def __init__(self, command_name):
self.name = command_name
def __str__(self):
return f"Command with name {self.name} not found"
| 26.714286
| 57
| 0.684492
| 24
| 187
| 4.916667
| 0.583333
| 0.186441
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224599
| 187
| 6
| 58
| 31.166667
| 0.813793
| 0
| 0
| 0
| 0
| 0
| 0.208556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
3d7b80b28194fbc97331a5b7daf2ad20906fbcac
| 78
|
py
|
Python
|
openmc/stats/__init__.py
|
janmalec/openmc
|
4a4ac4c351d41fe153ca3341820cc507e484ce50
|
[
"MIT"
] | 262
|
2018-08-09T21:27:03.000Z
|
2022-03-24T05:02:10.000Z
|
openmc/stats/__init__.py
|
janmalec/openmc
|
4a4ac4c351d41fe153ca3341820cc507e484ce50
|
[
"MIT"
] | 753
|
2018-08-03T15:26:57.000Z
|
2022-03-29T23:54:48.000Z
|
openmc/stats/__init__.py
|
janmalec/openmc
|
4a4ac4c351d41fe153ca3341820cc507e484ce50
|
[
"MIT"
] | 196
|
2018-08-06T13:41:14.000Z
|
2022-03-29T20:47:12.000Z
|
from openmc.stats.univariate import *
from openmc.stats.multivariate import *
| 26
| 39
| 0.820513
| 10
| 78
| 6.4
| 0.6
| 0.3125
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 2
| 40
| 39
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3da4f12db67ec8b91adf11ccf7fcf30560ac096b
| 48
|
py
|
Python
|
automix/transition/__init__.py
|
MZehren/Automix
|
dfaa00a9e7c5c0938c0a9d275c07f3a3e5f87e43
|
[
"MIT"
] | 18
|
2020-07-20T01:51:40.000Z
|
2022-02-25T07:32:11.000Z
|
automix/transition/__init__.py
|
MZehren/Automix
|
dfaa00a9e7c5c0938c0a9d275c07f3a3e5f87e43
|
[
"MIT"
] | 2
|
2021-03-23T03:26:02.000Z
|
2021-07-19T12:51:25.000Z
|
automix/transition/__init__.py
|
MZehren/Automix
|
dfaa00a9e7c5c0938c0a9d275c07f3a3e5f87e43
|
[
"MIT"
] | 5
|
2021-01-03T15:34:28.000Z
|
2022-02-22T06:07:06.000Z
|
"""
Package which generates the transitions.
"""
| 16
| 40
| 0.729167
| 5
| 48
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 48
| 3
| 41
| 16
| 0.833333
| 0.833333
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3da92e496b52c4b7d556be4b0b8d8845da5e22bd
| 34,473
|
py
|
Python
|
train_helper.py
|
ItGirls/event_extraction
|
b8b3bd9c5c34e7a1be086a7fcd608ef89335e4c1
|
[
"MIT"
] | 203
|
2020-05-18T02:26:24.000Z
|
2022-03-30T09:49:42.000Z
|
train_helper.py
|
ItGirls/event_extraction
|
b8b3bd9c5c34e7a1be086a7fcd608ef89335e4c1
|
[
"MIT"
] | 14
|
2020-05-19T02:34:16.000Z
|
2021-09-10T07:52:31.000Z
|
train_helper.py
|
djj-djj/event_extraction
|
dfecdbb7d0a905495065cc7a0a4afd10e6dd1130
|
[
"MIT"
] | 34
|
2020-05-19T02:25:26.000Z
|
2022-03-30T09:49:34.000Z
|
import os
import numpy as np
import time
import logging
from common_utils import set_logger
import tensorflow as tf
from sklearn.metrics import f1_score
from models.bert_mrc import bert_mrc_model_fn_builder
from models.bert_event_type_classification import bert_classification_model_fn_builder
from data_processing.data_utils import *
from data_processing.event_prepare_data import EventRolePrepareMRC, EventTypeClassificationPrepare
# from data_processing.event_prepare_data import EventRoleClassificationPrepare
from data_processing.event_prepare_data import event_input_bert_mrc_mul_fn, event_index_class_input_bert_fn
from data_processing.event_prepare_data import event_binclass_input_bert_fn
from models.bert_event_type_classification import bert_binaryclassification_model_fn_builder
from data_processing.event_prepare_data import event_input_verfify_mrc_fn
from models.event_verify_av import event_verify_mrc_model_fn_builder
from configs.event_config import event_config
# import horovod.tensorflow as hvd
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
logger = set_logger("[run training]")
# logger = logging.getLogger('train')
# logger.setLevel(logging.INFO)
# os.environ['TF_ENABLE_AUTO_MIXED_PRECISION']='1'
def serving_input_receiver_fn():
"""Serving input_fn that builds features from placeholders
Returns
-------
tf.estimator.export.ServingInputReceiver
"""
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
words_seq = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words_seq')
receiver_tensors = {'words': words, 'text_length': nwords, 'words_seq': words_seq}
features = {'words': words, 'text_length': nwords, 'words_seq': words_seq}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_serving_input_receiver_fn():
"""Serving input_fn that builds features from placeholders
Returns
-------
tf.estimator.export.ServingInputReceiver
"""
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
receiver_tensors = {'words': words, 'text_length': nwords}
features = {'words': words, 'text_length': nwords}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_event_type_serving_input_receiver_fn():
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
token_type_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="token_type_ids")
type_index_ids = tf.placeholder(dtype=tf.int32, shape=[None, 65], name="type_index_in_ids_list")
receiver_tensors = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids,
'type_index_in_ids_list': type_index_ids}
features = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids,
'type_index_in_ids_list': type_index_ids}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_event_bin_serving_input_receiver_fn():
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
token_type_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="token_type_ids")
receiver_tensors = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids}
features = {'words': words, 'text_length': nwords, 'token_type_ids': token_type_ids}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def bert_mrc_serving_input_receiver_fn():
# features['words'],features['text_length'],features['query_length'],features['token_type_ids']
words = tf.placeholder(dtype=tf.int32, shape=[None, None], name='words')
nwords = tf.placeholder(dtype=tf.int32, shape=[None], name='text_length')
query_lengths = tf.placeholder(dtype=tf.int32, shape=[None], name="query_length")
token_type_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name="token_type_ids")
receiver_tensors = {'words': words, 'text_length': nwords, 'query_length': query_lengths,
'token_type_ids': token_type_ids}
features = {'words': words, 'text_length': nwords, 'query_length': query_lengths, 'token_type_ids': token_type_ids}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def run_event_role_mrc(args):
"""
baseline 用mrc来做事件role抽取
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
slot_file = os.path.join(event_config.get("slot_list_root_path"),
event_config.get("bert_slot_complete_file_name_role"))
schema_file = os.path.join(event_config.get("data_dir"), event_config.get("event_schema"))
query_map_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("query_map_file"))
data_loader = EventRolePrepareMRC(vocab_file_path, 512, slot_file, schema_file, query_map_file)
# train_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_train"))
# eval_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_eval"))
# data_list,label_start_list,label_end_list,query_len_list,token_type_id_list
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,True)
# dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(eval_file,None,False)
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._merge_ee_and_re_datas(train_file,eval_file,"relation_extraction/data/train_data.json","relation_extraction/data/dev_data.json")
train_datas = np.load("data/neg_fold_data_{}/token_ids_train.npy".format(args.fold_index), allow_pickle=True)
train_labels = np.load("data/neg_fold_data_{}/multi_labels_train.npy".format(args.fold_index), allow_pickle=True)
train_query_lens = np.load("data/neg_fold_data_{}/query_lens_train.npy".format(args.fold_index), allow_pickle=True)
train_token_type_id_list = np.load("data/neg_fold_data_{}/token_type_ids_train.npy".format(args.fold_index),
allow_pickle=True)
dev_datas = np.load("data/neg_fold_data_{}/token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
dev_labels = np.load("data/neg_fold_data_{}/multi_labels_dev.npy".format(args.fold_index), allow_pickle=True)
dev_query_lens = np.load("data/neg_fold_data_{}/query_lens_dev.npy".format(args.fold_index), allow_pickle=True)
dev_token_type_id_list = np.load("data/neg_fold_data_{}/token_type_ids_dev.npy".format(args.fold_index),
allow_pickle=True)
train_samples_nums = len(train_datas)
dev_samples_nums = len(dev_datas)
if train_samples_nums % args.train_batch_size != 0:
each_epoch_steps = int(train_samples_nums / args.train_batch_size) + 1
else:
each_epoch_steps = int(train_samples_nums / args.train_batch_size)
# each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
logger.info('*****train_set sample nums:{}'.format(train_samples_nums))
logger.info('*****dev_set sample nums:{}'.format(dev_samples_nums))
logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
train_steps_nums = each_epoch_steps * args.epochs
# train_steps_nums = each_epoch_steps * args.epochs // hvd.size()
logger.info('*****train_total_steps:{}'.format(train_steps_nums))
decay_steps = args.decay_epoch * each_epoch_steps
logger.info('*****train decay steps:{}'.format(decay_steps))
# dropout_prob是丢弃概率
params = {"dropout_prob": args.dropout_prob, "num_labels": data_loader.labels_map_len,
"rnn_size": args.rnn_units, "num_layers": args.num_layers, "hidden_units": args.hidden_units,
"decay_steps": decay_steps, "train_steps": train_steps_nums,
"num_warmup_steps": int(train_steps_nums * 0.1)}
# dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
run_config = tf.estimator.RunConfig(
model_dir=model_base_dir,
save_summary_steps=each_epoch_steps,
save_checkpoints_steps=each_epoch_steps,
session_config=config_tf,
keep_checkpoint_max=3,
# train_distribute=dist_strategy
)
bert_init_checkpoints = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_init_checkpoints"))
# init_checkpoints = "output/model/merge_usingtype_roberta_traindev_event_role_bert_mrc_model_desmodified_lowercase/checkpoint/model.ckpt-1218868"
model_fn = bert_mrc_model_fn_builder(bert_config_file, bert_init_checkpoints, args)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=run_config)
if args.do_train:
train_input_fn = lambda: event_input_bert_mrc_mul_fn(
train_datas, train_labels, train_token_type_id_list, train_query_lens,
is_training=True, is_testing=False, args=args)
eval_input_fn = lambda: event_input_bert_mrc_mul_fn(
dev_datas, dev_labels, dev_token_type_id_list, dev_query_lens,
is_training=False, is_testing=False, args=args)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=train_steps_nums
)
exporter = tf.estimator.BestExporter(exports_to_keep=1,
serving_input_receiver_fn=bert_mrc_serving_input_receiver_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, exporters=[exporter], throttle_secs=0)
# for _ in range(args.epochs):
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# "bert_ce_model_pb"
estimator.export_saved_model(pb_model_dir, bert_mrc_serving_input_receiver_fn)
def run_event_classification(args):
"""
事件类型分析,多标签二分类问题,借鉴NL2SQL预测column的方法
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
print(model_base_dir)
print(pb_model_dir)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
event_type_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("event_type_file"))
data_loader = EventTypeClassificationPrepare(vocab_file_path, 512, event_type_file)
train_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_train"))
eval_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_eval"))
# train_data_list,train_label_list,train_token_type_id_list,dev_data_list,dev_label_list,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,is_train=True)
train_data_list = np.load("data/index_type_fold_data_{}/token_ids_train.npy".format(args.fold_index),
allow_pickle=True)
train_label_list = np.load("data/index_type_fold_data_{}/labels_train.npy".format(args.fold_index),
allow_pickle=True)
train_token_type_id_list = np.load("data/index_type_fold_data_{}/token_type_ids_train.npy".format(args.fold_index),
allow_pickle=True)
train_type_index_ids_list = np.load(
"data/index_type_fold_data_{}/type_index_in_token_ids_train.npy".format(args.fold_index), allow_pickle=True)
dev_data_list = np.load("data/index_type_fold_data_{}/token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
dev_label_list = np.load("data/index_type_fold_data_{}/labels_dev.npy".format(args.fold_index), allow_pickle=True)
dev_token_type_id_list = np.load("data/index_type_fold_data_{}/token_type_ids_dev.npy".format(args.fold_index),
allow_pickle=True)
dev_type_index_ids_list = np.load(
"data/index_type_fold_data_{}/type_index_in_token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
train_labels = np.array(train_label_list)
# print(train_labels.shape)
print(train_labels.shape)
a = np.sum(train_labels, axis=0)
a = [max(a) / ele for ele in a]
class_weight = np.array(a)
class_weight = np.reshape(class_weight, (1, 65))
print(class_weight)
# dev_datas,dev_token_type_ids,dev_labels = data_loader._read_json_file(eval_file)
train_samples_nums = len(train_data_list)
dev_samples_nums = len(dev_data_list)
if train_samples_nums % args.train_batch_size != 0:
each_epoch_steps = int(train_samples_nums / args.train_batch_size) + 1
else:
each_epoch_steps = int(train_samples_nums / args.train_batch_size)
# each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
logger.info('*****train_set sample nums:{}'.format(train_samples_nums))
logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
train_steps_nums = each_epoch_steps * args.epochs
# train_steps_nums = each_epoch_steps * args.epochs // hvd.size()
logger.info('*****train_total_steps:{}'.format(train_steps_nums))
decay_steps = args.decay_epoch * each_epoch_steps
logger.info('*****train decay steps:{}'.format(decay_steps))
# dropout_prob是丢弃概率
params = {"dropout_prob": args.dropout_prob, "num_labels": data_loader.labels_map_len,
"rnn_size": args.rnn_units, "num_layers": args.num_layers, "hidden_units": args.hidden_units,
"decay_steps": decay_steps, "class_weight": class_weight}
# dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
# "bert_ce_model_dir"
# mirrored_strategy = tf.distribute.MirroredStrategy()
# config_tf.gpu_options.visible_device_list = str(hvd.local_rank())
# checkpoint_path = os.path.join(bert_config.get(args.model_checkpoint_dir), str(hvd.rank()))
run_config = tf.estimator.RunConfig(
model_dir=model_base_dir,
save_summary_steps=train_steps_nums + 10,
save_checkpoints_steps=each_epoch_steps,
session_config=config_tf,
keep_checkpoint_max=1,
# train_distribute=dist_strategy
)
bert_init_checkpoints = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_init_checkpoints"))
model_fn = bert_classification_model_fn_builder(bert_config_file, bert_init_checkpoints, args)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=run_config)
if args.do_train:
# train_input_fn = lambda: data_loader.create_dataset(is_training=True,is_testing=False, args=args)
# eval_input_fn = lambda: data_loader.create_dataset(is_training=False,is_testing=False,args=args)
# train_X,train_Y = np.load(data_loader.train_X_path,allow_pickle=True),np.load(data_loader.train_Y_path,allow_pickle=True)
# train_input_fn = lambda :event_class_input_bert_fn(train_data_list,token_type_ids=train_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=True,is_testing=False,args=args,input_Ys=train_label_list)
train_input_fn = lambda: event_index_class_input_bert_fn(train_data_list,
token_type_ids=train_token_type_id_list,
type_index_ids_list=train_type_index_ids_list,
label_map_len=data_loader.labels_map_len,
is_training=True, is_testing=False, args=args,
input_Ys=train_label_list)
# eval_X,eval_Y = np.load(data_loader.valid_X_path,allow_pickle=True),np.load(data_loader.valid_Y_path,allow_pickle=True)
# eval_input_fn = lambda: event_class_input_bert_fn(dev_data_list,token_type_ids=dev_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=False,is_testing=False,args=args,input_Ys=dev_label_list)
eval_input_fn = lambda: event_index_class_input_bert_fn(dev_data_list, token_type_ids=dev_token_type_id_list,
type_index_ids_list=dev_type_index_ids_list,
label_map_len=data_loader.labels_map_len,
is_training=False, is_testing=False, args=args,
input_Ys=dev_label_list)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=train_steps_nums
)
exporter = tf.estimator.BestExporter(exports_to_keep=1,
serving_input_receiver_fn=bert_event_type_serving_input_receiver_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, throttle_secs=0, exporters=[exporter])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# "bert_ce_model_pb"
estimator.export_saved_model(pb_model_dir, bert_event_type_serving_input_receiver_fn)
def run_event_binclassification(args):
"""
retroreader中的eav模块,即第一遍阅读模块,预测该问题是否有回答
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
print(model_base_dir)
print(pb_model_dir)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
event_type_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("event_type_file"))
# data_loader =EventTypeClassificationPrepare(vocab_file_path,512,event_type_file)
# train_file = os.path.join(event_config.get("data_dir"),event_config.get("event_data_file_train"))
# eval_file = os.path.join(event_config.get("data_dir"),event_config.get("event_data_file_eval"))
# train_data_list,train_label_list,train_token_type_id_list,dev_data_list,dev_label_list,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,is_train=True)
train_data_list = np.load("data/verify_neg_fold_data_{}/token_ids_train.npy".format(args.fold_index),
allow_pickle=True)
# train_label_list = np.load("data/verify_neg_fold_data_{}/has_answer_train.npy".format(args.fold_index),allow_pickle=True)
train_label_list = []
train_start_labels = np.load("data/verify_neg_fold_data_{}/labels_start_train.npy".format(args.fold_index),
allow_pickle=True)
dev_start_labels = np.load("data/verify_neg_fold_data_{}/labels_start_dev.npy".format(args.fold_index),
allow_pickle=True)
train_token_type_id_list = np.load("data/verify_neg_fold_data_{}/token_type_ids_train.npy".format(args.fold_index),
allow_pickle=True)
dev_data_list = np.load("data/verify_neg_fold_data_{}/token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
# dev_label_list = np.load("data/verify_neg_fold_data_{}/has_answer_dev.npy".format(args.fold_index),allow_pickle=True)
dev_label_list = []
dev_token_type_id_list = np.load("data/verify_neg_fold_data_{}/token_type_ids_dev.npy".format(args.fold_index),
allow_pickle=True)
# dev_datas,dev_token_type_ids,dev_labels = data_loader._read_json_file(eval_file)
train_samples_nums = len(train_data_list)
for i in range(train_samples_nums):
if sum(train_start_labels[i]) == 0:
train_label_list.append(0)
else:
train_label_list.append(1)
train_label_list = np.array(train_label_list).reshape((train_samples_nums, 1))
dev_samples_nums = len(dev_data_list)
for i in range(dev_samples_nums):
if sum(dev_start_labels[i]) == 0:
dev_label_list.append(0)
else:
dev_label_list.append(1)
dev_label_list = np.array(dev_label_list).reshape((dev_samples_nums, 1))
if train_samples_nums % args.train_batch_size != 0:
each_epoch_steps = int(train_samples_nums / args.train_batch_size) + 1
else:
each_epoch_steps = int(train_samples_nums / args.train_batch_size)
# each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
logger.info('*****train_set sample nums:{}'.format(train_samples_nums))
logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
train_steps_nums = each_epoch_steps * args.epochs
# train_steps_nums = each_epoch_steps * args.epochs // hvd.size()
logger.info('*****train_total_steps:{}'.format(train_steps_nums))
decay_steps = args.decay_epoch * each_epoch_steps
logger.info('*****train decay steps:{}'.format(decay_steps))
# dropout_prob是丢弃概率
params = {"dropout_prob": args.dropout_prob, "num_labels": 1,
"rnn_size": args.rnn_units, "num_layers": args.num_layers, "hidden_units": args.hidden_units,
"decay_steps": decay_steps, "class_weight": 1}
# dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
# "bert_ce_model_dir"
# mirrored_strategy = tf.distribute.MirroredStrategy()
# config_tf.gpu_options.visible_device_list = str(hvd.local_rank())
# checkpoint_path = os.path.join(bert_config.get(args.model_checkpoint_dir), str(hvd.rank()))
run_config = tf.estimator.RunConfig(
model_dir=model_base_dir,
save_summary_steps=train_steps_nums + 10,
save_checkpoints_steps=each_epoch_steps,
session_config=config_tf,
keep_checkpoint_max=1,
# train_distribute=dist_strategy
)
bert_init_checkpoints = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_init_checkpoints"))
model_fn = bert_binaryclassification_model_fn_builder(bert_config_file, bert_init_checkpoints, args)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=run_config)
if args.do_train:
# train_input_fn = lambda: data_loader.create_dataset(is_training=True,is_testing=False, args=args)
# eval_input_fn = lambda: data_loader.create_dataset(is_training=False,is_testing=False,args=args)
# train_X,train_Y = np.load(data_loader.train_X_path,allow_pickle=True),np.load(data_loader.train_Y_path,allow_pickle=True)
# train_input_fn = lambda :event_class_input_bert_fn(train_data_list,token_type_ids=train_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=True,is_testing=False,args=args,input_Ys=train_label_list)
train_input_fn = lambda: event_binclass_input_bert_fn(train_data_list, token_type_ids=train_token_type_id_list,
label_map_len=1,
is_training=True, is_testing=False, args=args,
input_Ys=train_label_list)
# eval_X,eval_Y = np.load(data_loader.valid_X_path,allow_pickle=True),np.load(data_loader.valid_Y_path,allow_pickle=True)
# eval_input_fn = lambda: event_class_input_bert_fn(dev_data_list,token_type_ids=dev_token_type_id_list,label_map_len=data_loader.labels_map_len,
# is_training=False,is_testing=False,args=args,input_Ys=dev_label_list)
eval_input_fn = lambda: event_binclass_input_bert_fn(dev_data_list, token_type_ids=dev_token_type_id_list,
label_map_len=1,
is_training=False, is_testing=False, args=args,
input_Ys=dev_label_list)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=train_steps_nums
)
exporter = tf.estimator.BestExporter(exports_to_keep=1,
serving_input_receiver_fn=bert_event_bin_serving_input_receiver_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, throttle_secs=0, exporters=[exporter])
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# "bert_ce_model_pb"
estimator.export_saved_model(pb_model_dir, bert_event_bin_serving_input_receiver_fn)
def run_event_verify_role_mrc(args):
"""
retro reader 第二阶段的精度模块,同时训练两个任务,role抽取和问题是否可以回答
:param args:
:return:
"""
model_base_dir = event_config.get(args.model_checkpoint_dir).format(args.fold_index)
pb_model_dir = event_config.get(args.model_pb_dir).format(args.fold_index)
vocab_file_path = os.path.join(event_config.get("bert_pretrained_model_path"), event_config.get("vocab_file"))
bert_config_file = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_config_path"))
slot_file = os.path.join(event_config.get("slot_list_root_path"),
event_config.get("bert_slot_complete_file_name_role"))
schema_file = os.path.join(event_config.get("data_dir"), event_config.get("event_schema"))
query_map_file = os.path.join(event_config.get("slot_list_root_path"), event_config.get("query_map_file"))
data_loader = EventRolePrepareMRC(vocab_file_path, 512, slot_file, schema_file, query_map_file)
# train_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_train"))
# eval_file = os.path.join(event_config.get("data_dir"), event_config.get("event_data_file_eval"))
# data_list,label_start_list,label_end_list,query_len_list,token_type_id_list
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(train_file,eval_file,True)
# dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._read_json_file(eval_file,None,False)
# train_datas, train_labels_start,train_labels_end,train_query_lens,train_token_type_id_list,dev_datas, dev_labels_start,dev_labels_end,dev_query_lens,dev_token_type_id_list = data_loader._merge_ee_and_re_datas(train_file,eval_file,"relation_extraction/data/train_data.json","relation_extraction/data/dev_data.json")
train_has_answer_label_list = []
dev_has_answer_label_list = []
train_datas = np.load("data/verify_neg_fold_data_{}/token_ids_train.npy".format(args.fold_index), allow_pickle=True)
# train_has_answer_label_list = np.load("data/verify_neg_fold_data_{}/has_answer_train.npy".format(args.fold_index),allow_pickle=True)
train_token_type_id_list = np.load("data/verify_neg_fold_data_{}/token_type_ids_train.npy".format(args.fold_index),
allow_pickle=True)
dev_datas = np.load("data/verify_neg_fold_data_{}/token_ids_dev.npy".format(args.fold_index), allow_pickle=True)
# dev_has_answer_label_list = np.load("data/verify_neg_fold_data_{}/has_answer_dev.npy".format(args.fold_index),allow_pickle=True)
dev_token_type_id_list = np.load("data/verify_neg_fold_data_{}/token_type_ids_dev.npy".format(args.fold_index),
allow_pickle=True)
train_query_lens = np.load("data/verify_neg_fold_data_{}/query_lens_train.npy".format(args.fold_index),
allow_pickle=True)
dev_query_lens = np.load("data/verify_neg_fold_data_{}/query_lens_dev.npy".format(args.fold_index),
allow_pickle=True)
train_start_labels = np.load("data/verify_neg_fold_data_{}/labels_start_train.npy".format(args.fold_index),
allow_pickle=True)
dev_start_labels = np.load("data/verify_neg_fold_data_{}/labels_start_dev.npy".format(args.fold_index),
allow_pickle=True)
train_end_labels = np.load("data/verify_neg_fold_data_{}/labels_end_train.npy".format(args.fold_index),
allow_pickle=True)
dev_end_labels = np.load("data/verify_neg_fold_data_{}/labels_end_dev.npy".format(args.fold_index),
allow_pickle=True)
train_samples_nums = len(train_datas)
for i in range(train_samples_nums):
if sum(train_start_labels[i]) == 0:
train_has_answer_label_list.append(0)
else:
train_has_answer_label_list.append(1)
train_has_answer_label_list = np.array(train_has_answer_label_list).reshape((train_samples_nums, 1))
dev_samples_nums = len(dev_datas)
for i in range(dev_samples_nums):
if sum(dev_start_labels[i]) == 0:
dev_has_answer_label_list.append(0)
else:
dev_has_answer_label_list.append(1)
dev_has_answer_label_list = np.array(dev_has_answer_label_list).reshape((dev_samples_nums, 1))
if train_samples_nums % args.train_batch_size != 0:
each_epoch_steps = int(train_samples_nums / args.train_batch_size) + 1
else:
each_epoch_steps = int(train_samples_nums / args.train_batch_size)
# each_epoch_steps = int(data_loader.train_samples_nums/args.train_batch_size)+1
logger.info('*****train_set sample nums:{}'.format(train_samples_nums))
logger.info('*****dev_set sample nums:{}'.format(dev_samples_nums))
logger.info('*****train each epoch steps:{}'.format(each_epoch_steps))
train_steps_nums = each_epoch_steps * args.epochs
# train_steps_nums = each_epoch_steps * args.epochs // hvd.size()
logger.info('*****train_total_steps:{}'.format(train_steps_nums))
decay_steps = args.decay_epoch * each_epoch_steps
logger.info('*****train decay steps:{}'.format(decay_steps))
# dropout_prob是丢弃概率
params = {"dropout_prob": args.dropout_prob, "num_labels": 2,
"rnn_size": args.rnn_units, "num_layers": args.num_layers, "hidden_units": args.hidden_units,
"decay_steps": decay_steps, "train_steps": train_steps_nums,
"num_warmup_steps": int(train_steps_nums * 0.1)}
# dist_strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=args.gpu_nums)
config_tf = tf.ConfigProto()
config_tf.gpu_options.allow_growth = True
run_config = tf.estimator.RunConfig(
model_dir=model_base_dir,
save_summary_steps=each_epoch_steps,
save_checkpoints_steps=each_epoch_steps,
session_config=config_tf,
keep_checkpoint_max=3,
# train_distribute=dist_strategy
)
bert_init_checkpoints = os.path.join(event_config.get("bert_pretrained_model_path"),
event_config.get("bert_init_checkpoints"))
# init_checkpoints = "output/model/merge_usingtype_roberta_traindev_event_role_bert_mrc_model_desmodified_lowercase/checkpoint/model.ckpt-1218868"
model_fn = event_verify_mrc_model_fn_builder(bert_config_file, bert_init_checkpoints, args)
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=run_config)
if args.do_train:
train_input_fn = lambda: event_input_verfify_mrc_fn(
train_datas, train_start_labels, train_end_labels, train_token_type_id_list, train_query_lens,
train_has_answer_label_list,
is_training=True, is_testing=False, args=args)
eval_input_fn = lambda: event_input_verfify_mrc_fn(
dev_datas, dev_start_labels, dev_end_labels, dev_token_type_id_list, dev_query_lens,
dev_has_answer_label_list,
is_training=False, is_testing=False, args=args)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=train_steps_nums
)
exporter = tf.estimator.BestExporter(exports_to_keep=1,
serving_input_receiver_fn=bert_mrc_serving_input_receiver_fn)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, exporters=[exporter], throttle_secs=0)
# for _ in range(args.epochs):
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# "bert_ce_model_pb"
estimator.export_saved_model(pb_model_dir, bert_mrc_serving_input_receiver_fn)
| 63.603321
| 320
| 0.711542
| 4,807
| 34,473
| 4.641564
| 0.052216
| 0.029446
| 0.040158
| 0.037469
| 0.953164
| 0.937926
| 0.919102
| 0.904401
| 0.891583
| 0.871818
| 0
| 0.00432
| 0.18748
| 34,473
| 541
| 321
| 63.720887
| 0.792253
| 0.229281
| 0
| 0.657068
| 0
| 0
| 0.141133
| 0.082843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02356
| false
| 0
| 0.044503
| 0
| 0.081152
| 0.015707
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3daed6f58209c91edf2eaa9ed7eb396be2af00ff
| 4,451
|
py
|
Python
|
tests/test_process_listing.py
|
ProzorroUKR/prozorro_chronograph
|
f8a560322259b5bb07035b133f545a614130de73
|
[
"Apache-2.0"
] | null | null | null |
tests/test_process_listing.py
|
ProzorroUKR/prozorro_chronograph
|
f8a560322259b5bb07035b133f545a614130de73
|
[
"Apache-2.0"
] | null | null | null |
tests/test_process_listing.py
|
ProzorroUKR/prozorro_chronograph
|
f8a560322259b5bb07035b133f545a614130de73
|
[
"Apache-2.0"
] | null | null | null |
from uuid import uuid4
from datetime import timedelta
from unittest.mock import patch, Mock
from freezegun import freeze_time
from prozorro_chronograph.utils import get_now
from prozorro_chronograph.scheduler import process_listing, push
from prozorro_chronograph.settings import TZ
from .base import BaseTenderTest
class TestTenderProcessListing(BaseTenderTest):
@freeze_time("2012-01-14")
@patch("prozorro_chronograph.scheduler.check_auction")
@patch("prozorro_chronograph.scheduler.randint", return_value=2)
@patch("prozorro_chronograph.scheduler.asyncio.sleep")
@patch("prozorro_chronograph.scheduler.scheduler.add_job")
async def test_process_listing_without_next_check(self, mock_add_job, mock_sleep, _, __, caplog):
tender = {
"id": uuid4().hex,
"submissionMethodDetails": {"quick": "value"},
"auctionPeriod": {
"shouldStartAfter": (get_now() + timedelta(days=2)).isoformat(),
"startDate": (get_now() + timedelta(days=1)).isoformat()
}
}
server_id_cookie = "value"
await process_listing(server_id_cookie, tender)
mock_add_job.assert_called_once_with(
push,
"date",
run_date=get_now() + timedelta(seconds=2),
id=f'resync_{tender["id"]}',
name=f'Resync {tender["id"]}',
misfire_grace_time=60 * 60,
args=["resync", tender["id"], server_id_cookie],
replace_existing=True,
)
assert f'Start processing tender: {tender["id"]}' in caplog.messages[0]
assert f'Set resync job for tender {tender["id"]}' in caplog.messages[1]
assert len(caplog.messages) == 2
mock_sleep.assert_called_once_with(1)
@freeze_time("2012-01-14")
@patch("prozorro_chronograph.scheduler.check_auction")
@patch("prozorro_chronograph.scheduler.randint", Mock(return_value=2))
@patch("prozorro_chronograph.scheduler.asyncio.sleep")
@patch("prozorro_chronograph.scheduler.scheduler.add_job")
async def test_process_listing_with_next_check(self, mock_add_job, mock_sleep, _, caplog):
next_check = get_now() - timedelta(days=2)
tenant_id = uuid4().hex
tender = {
"id": tenant_id,
"next_check": next_check.isoformat(),
}
server_id_cookie = "value"
await process_listing(server_id_cookie, tender)
mock_add_job.assert_called_once()
mock_add_job.assert_called_with(
push,
"date",
run_date=get_now() + timedelta(seconds=2),
timezone=TZ,
id=f'recheck_{tender["id"]}',
name=f'Recheck {tender["id"]}',
misfire_grace_time=60 * 60,
replace_existing=True,
args=["recheck", tender["id"], server_id_cookie],
)
assert f'Start processing tender: {tender["id"]}' in caplog.messages[0]
assert f"Tender {tenant_id} don't need to resync" in caplog.messages[1]
assert len(caplog.messages) == 2
mock_sleep.assert_called_once_with(1)
@freeze_time("2012-01-14")
@patch("prozorro_chronograph.scheduler.check_auction")
@patch("prozorro_chronograph.scheduler.randint", return_value=2)
@patch("prozorro_chronograph.scheduler.asyncio.sleep")
@patch("prozorro_chronograph.scheduler.scheduler.add_job")
async def test_process_listing_with_next_check_without_recheck_job(self, mock_add_job, mock_sleep, _, __, caplog):
next_check = get_now() + timedelta(days=2)
tenant_id = uuid4().hex
tender = {
"id": tenant_id,
"next_check": next_check.isoformat(),
}
server_id_cookie = "value"
await process_listing(server_id_cookie, tender)
mock_add_job.assert_called_once_with(
push,
"date",
run_date=next_check + timedelta(seconds=2),
timezone=TZ,
id=f'recheck_{tender["id"]}',
name=f'Recheck {tender["id"]}',
misfire_grace_time=60 * 60,
replace_existing=True,
args=["recheck", tender["id"], server_id_cookie],
)
assert f'Start processing tender: {tender["id"]}' in caplog.messages[0]
assert f"Tender {tenant_id} don't need to resync" in caplog.messages[1]
assert len(caplog.messages) == 2
mock_sleep.assert_called_once_with(1)
| 42.390476
| 118
| 0.6439
| 529
| 4,451
| 5.136106
| 0.175803
| 0.047111
| 0.133971
| 0.145749
| 0.802356
| 0.7788
| 0.773647
| 0.763342
| 0.763342
| 0.749356
| 0
| 0.018294
| 0.238598
| 4,451
| 104
| 119
| 42.798077
| 0.783417
| 0
| 0
| 0.653061
| 0
| 0
| 0.239721
| 0.137048
| 0
| 0
| 0
| 0
| 0.163265
| 1
| 0
| false
| 0
| 0.081633
| 0
| 0.091837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3db81911591cf7db62ea9a321497d44bb4cc2036
| 118
|
py
|
Python
|
examples/rietveld/rietveld_helper/views.py
|
NaN-tic/django-gae2django
|
ea3bc1e8aa9e072bf93b131816e0d20e1795c999
|
[
"Apache-2.0"
] | 3
|
2015-05-04T13:49:41.000Z
|
2017-04-25T06:27:39.000Z
|
examples/rietveld/rietveld_helper/views.py
|
NaN-tic/django-gae2django
|
ea3bc1e8aa9e072bf93b131816e0d20e1795c999
|
[
"Apache-2.0"
] | 1
|
2021-05-21T20:01:35.000Z
|
2021-05-21T20:01:35.000Z
|
examples/rietveld/rietveld_helper/views.py
|
NaN-tic/django-gae2django
|
ea3bc1e8aa9e072bf93b131816e0d20e1795c999
|
[
"Apache-2.0"
] | 2
|
2020-11-13T17:43:42.000Z
|
2021-05-21T20:01:32.000Z
|
from django.http import HttpResponseRedirect
def admin_redirect(request):
return HttpResponseRedirect('/admin/')
| 23.6
| 44
| 0.805085
| 12
| 118
| 7.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110169
| 118
| 4
| 45
| 29.5
| 0.895238
| 0
| 0
| 0
| 0
| 0
| 0.059322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
3ddbea75aa0c18cb6e05235979d67b9d5d5b30a2
| 81
|
py
|
Python
|
accounts/admin_inlines.py
|
biotech2021/uniTicket
|
8c441eac18e67a983e158326b1c4b82f00f1f1ef
|
[
"Apache-2.0"
] | 15
|
2019-09-06T06:47:08.000Z
|
2022-01-17T06:39:54.000Z
|
accounts/admin_inlines.py
|
biotech2021/uniTicket
|
8c441eac18e67a983e158326b1c4b82f00f1f1ef
|
[
"Apache-2.0"
] | 69
|
2019-09-06T12:03:19.000Z
|
2022-03-26T14:30:53.000Z
|
accounts/admin_inlines.py
|
biotech2021/uniTicket
|
8c441eac18e67a983e158326b1c4b82f00f1f1ef
|
[
"Apache-2.0"
] | 13
|
2019-09-11T10:54:20.000Z
|
2021-11-23T09:09:19.000Z
|
from django import forms
from django.contrib import admin
from .models import *
| 16.2
| 32
| 0.802469
| 12
| 81
| 5.416667
| 0.583333
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.160494
| 81
| 4
| 33
| 20.25
| 0.955882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9a780bff9e57996d58fffbbf3bd5e95b123ddaca
| 615
|
py
|
Python
|
biz/t9.py
|
relax-space/python-learning
|
22987e20a4b0a741e1c5ed8603a952a0fc8dd4bd
|
[
"Apache-2.0"
] | null | null | null |
biz/t9.py
|
relax-space/python-learning
|
22987e20a4b0a741e1c5ed8603a952a0fc8dd4bd
|
[
"Apache-2.0"
] | null | null | null |
biz/t9.py
|
relax-space/python-learning
|
22987e20a4b0a741e1c5ed8603a952a0fc8dd4bd
|
[
"Apache-2.0"
] | null | null | null |
import pytest
@pytest.fixture()
def a1(tmp_path_factory, worker_id):
return 11
# @pytest.mark.parametrize('common_arg1', [{}])
class TestParametrized:
@pytest.mark.parametrize(('a','b'), [{0:[1],1:[22]}],ids=['12','23'])
def test_1(self, a1, a,b):
print(a1)
pass
# @pytest.mark.parametrize('b', [0, 1])
# def test_2(self, common_arg1, b):
# common_arg1[2] = 4
# print(common_arg1)
# pass
# @pytest.mark.parametrize('x', [0, 1])
# def test_100(self, common_arg1, x):
# common_arg1[1] = 5
# print(common_arg1)
# pass
| 21.206897
| 73
| 0.560976
| 84
| 615
| 3.952381
| 0.416667
| 0.210843
| 0.253012
| 0.150602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073913
| 0.252033
| 615
| 28
| 74
| 21.964286
| 0.647826
| 0.489431
| 0
| 0
| 0
| 0
| 0.019868
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.111111
| 0.111111
| 0.111111
| 0.555556
| 0.111111
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
9aa0cd2ac436f5c595fac6dd4b232f01a919c0bb
| 26
|
py
|
Python
|
build/lib/ajaira/__init__.py
|
amitrm/ajaira
|
aca6b4970cc134480f0e0b9b313dccb86558b738
|
[
"MIT"
] | null | null | null |
build/lib/ajaira/__init__.py
|
amitrm/ajaira
|
aca6b4970cc134480f0e0b9b313dccb86558b738
|
[
"MIT"
] | null | null | null |
build/lib/ajaira/__init__.py
|
amitrm/ajaira
|
aca6b4970cc134480f0e0b9b313dccb86558b738
|
[
"MIT"
] | null | null | null |
from ajaira.jinis import *
| 26
| 26
| 0.807692
| 4
| 26
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9aa695b63f4986f611994222d09b78d25bf4ca16
| 114
|
py
|
Python
|
python/speak/goodbye.py
|
kyle-cook/templates
|
f1047a8c31a42507acbd7a27e66db0825be811a6
|
[
"MIT"
] | null | null | null |
python/speak/goodbye.py
|
kyle-cook/templates
|
f1047a8c31a42507acbd7a27e66db0825be811a6
|
[
"MIT"
] | null | null | null |
python/speak/goodbye.py
|
kyle-cook/templates
|
f1047a8c31a42507acbd7a27e66db0825be811a6
|
[
"MIT"
] | null | null | null |
def goodbyeworld():
""" Prints Goodbye World! to the STDOUT screen buffer """
return "Goodbye World!"
| 28.5
| 61
| 0.649123
| 13
| 114
| 5.692308
| 0.846154
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236842
| 114
| 4
| 62
| 28.5
| 0.850575
| 0.429825
| 0
| 0
| 0
| 0
| 0.241379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9ac2c5d0c20d5fbe9178b654996a021b76f88a41
| 13,612
|
py
|
Python
|
tests/test_priors.py
|
RobinAlgayres/beer
|
15ad0dad5a49f98e658e948724e05df347ffe3b8
|
[
"MIT"
] | 46
|
2018-02-27T18:15:08.000Z
|
2022-02-16T22:10:55.000Z
|
tests/test_priors.py
|
RobinAlgayres/beer
|
15ad0dad5a49f98e658e948724e05df347ffe3b8
|
[
"MIT"
] | 16
|
2018-01-26T14:18:51.000Z
|
2021-02-05T09:34:00.000Z
|
tests/test_priors.py
|
RobinAlgayres/beer
|
15ad0dad5a49f98e658e948724e05df347ffe3b8
|
[
"MIT"
] | 26
|
2018-03-12T14:03:26.000Z
|
2021-05-24T21:15:01.000Z
|
'Test the priors package.'
import sys
sys.path.insert(0, './')
sys.path.insert(0, './tests')
import unittest
import torch
import beer
from basetest import BaseTest
class BaseTestPrior(BaseTest):
def test_exp_sufficient_statistics(self):
stats1 = self.prior.expected_sufficient_statistics()
copied_tensor = torch.tensor(self.prior.natural_parameters,
requires_grad=True)
log_norm = self.prior.log_norm(copied_tensor)
torch.autograd.backward(log_norm)
stats2 = copied_tensor.grad
self.assertArraysAlmostEqual(stats1.numpy(), stats2.numpy())
########################################################################
# Dirichlet.
########################################################################
class TestDirichletPrior(BaseTestPrior):
def setUp(self):
dim = 10
self.std_parameters = 2 * torch.ones(dim)
self.prior = beer.priors.DirichletPrior(self.std_parameters)
def test_natural2std(self):
std_params = self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(
std_params.numpy(),
self.std_parameters.numpy()
)
def test_std2natural(self):
std_params = self.prior.to_std_parameters(self.prior.natural_parameters)
nparams = self.prior.to_natural_parameters(std_params)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Gamma.
########################################################################
class TestGammaPrior(BaseTestPrior):
def setUp(self):
dim = 10
self.shape = torch.tensor(2).type(self.type)
self.rate = torch.tensor(.5).type(self.type)
self.prior = beer.priors.GammaPrior(self.shape, self.rate)
def test_natural2std(self):
shape, rate = self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(shape.numpy(), self.shape.numpy())
self.assertArraysAlmostEqual(rate.numpy(), self.rate.numpy())
def test_std2natural(self):
shape, rate = self.prior.to_std_parameters(self.prior.natural_parameters)
nparams = self.prior.to_natural_parameters(shape, rate)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Wishart.
########################################################################
class TestWishartPrior(BaseTestPrior):
def setUp(self):
dim = 10
self.scale = torch.eye(dim).type(self.type)
self.dof = torch.tensor(dim + 2).type(self.type)
self.prior = beer.priors.WishartPrior(self.scale, self.dof)
def test_natural2std(self):
scale, dof = self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(scale.numpy(), self.scale.numpy())
self.assertArraysAlmostEqual(dof.numpy(), self.dof.numpy())
def test_std2natural(self):
scale, dof = self.prior.to_std_parameters(self.prior.natural_parameters)
nparams = self.prior.to_natural_parameters(scale, dof)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Normal Full covariance.
########################################################################
class TestNormalFullCovariancePrior(BaseTestPrior):
def setUp(self):
dim = 10
self.scale = torch.eye(dim).type(self.type)
self.dof = torch.tensor(dim + 2).type(self.type)
self.prior_precision = beer.priors.WishartPrior(self.scale, self.dof)
self.mean = 3 * torch.ones(dim).type(self.type)
self.scale = torch.tensor(1.5).type(self.type)
self.prior = beer.priors.NormalFullCovariancePrior(self.mean, self.scale,
self.prior_precision)
def test_natural2std(self):
mean, scale = self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(mean.numpy(), self.mean.numpy())
self.assertArraysAlmostEqual(scale.numpy(), self.scale.numpy())
def test_std2natural(self):
mean, scale = self.prior.to_std_parameters(self.prior.natural_parameters)
nparams = self.prior.to_natural_parameters(mean, scale)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Normal Wishart.
########################################################################
class TestNormalWishartPrior(BaseTestPrior):
def setUp(self):
dim = 10
self.mean = 3 + torch.zeros(dim).type(self.type)
self.scale = torch.tensor(2.5).type(self.type)
self.mean_precision = torch.eye(dim).type(self.type) \
+ torch.ger(self.mean, self.mean)
self.dof = torch.tensor(dim + 2).type(self.type)
self.prior = beer.priors.NormalWishartPrior(self.mean, self.scale,
self.mean_precision, self.dof)
def test_natural2std(self):
mean, scale, mean_precision, dof = \
self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(mean.numpy(), self.mean.numpy())
self.assertArraysAlmostEqual(scale.numpy(), self.scale.numpy())
self.assertArraysAlmostEqual(mean_precision.numpy(), self.mean_precision.numpy())
self.assertArraysAlmostEqual(dof.numpy(), self.dof.numpy())
def test_std2natural(self):
mean, scale, mean_precision, dof = self.prior.to_std_parameters()
nparams = self.prior.to_natural_parameters(mean, scale, mean_precision, dof)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Normal Gamma.
########################################################################
class TestNormalGammaPrior(BaseTestPrior):
def setUp(self):
dim = 10
self.mean = 3 + torch.zeros(dim).type(self.type)
self.scale = torch.tensor(2.5).type(self.type)
self.shape = torch.tensor(3).type(self.type)
self.rates = 2* torch.ones(dim).type(self.type)
self.prior = beer.priors.NormalGammaPrior(self.mean, self.scale,
self.shape, self.rates)
def test_natural2std(self):
mean, scale, shape, rates = \
self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(mean.numpy(), self.mean.numpy())
self.assertArraysAlmostEqual(scale.numpy(), self.scale.numpy())
self.assertArraysAlmostEqual(shape.numpy(), self.shape.numpy())
self.assertArraysAlmostEqual(rates.numpy(), self.rates.numpy())
def test_std2natural(self):
mean, scale, shape, rates = self.prior.to_std_parameters()
nparams = self.prior.to_natural_parameters(mean, scale, shape, rates)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Isotropic Normal Gamma.
########################################################################
class TestIsotropicNormalGammaPrior(BaseTestPrior):
def setUp(self):
dim = 10
self.mean = 3 + torch.zeros(dim).type(self.type)
self.scale = torch.tensor(2.5).type(self.type)
self.shape = torch.tensor(3).type(self.type)
self.rate = torch.tensor(2).type(self.type)
self.prior = beer.priors.IsotropicNormalGammaPrior(self.mean, self.scale,
self.shape, self.rate)
def test_natural2std(self):
mean, scale, shape, rate = \
self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(mean.numpy(), self.mean.numpy())
self.assertArraysAlmostEqual(scale.numpy(), self.scale.numpy())
self.assertArraysAlmostEqual(shape.numpy(), self.shape.numpy())
self.assertArraysAlmostEqual(rate.numpy(), self.rate.numpy())
def test_std2natural(self):
mean, scale, shape, rate = self.prior.to_std_parameters()
nparams = self.prior.to_natural_parameters(mean, scale, shape, rate)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Joint Isotropic Normal Gamma.
########################################################################
class TestJointIsotropicNormalGammaPrior(BaseTestPrior):
def setUp(self):
dim = 10
k = 3
self.means = 3 + torch.zeros(k, dim).type(self.type)
self.scales = 2.5 * torch.ones(k).type(self.type)
self.shape = torch.tensor(3).type(self.type)
self.rate = torch.tensor(2).type(self.type)
self.prior = beer.priors.JointIsotropicNormalGammaPrior(self.means, self.scales,
self.shape, self.rate)
def test_natural2std(self):
means, scales, shape, rate = \
self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(means.numpy(), self.means.numpy())
self.assertArraysAlmostEqual(scales.numpy(), self.scales.numpy())
self.assertArraysAlmostEqual(shape.numpy(), self.shape.numpy())
self.assertArraysAlmostEqual(rate.numpy(), self.rate.numpy())
def test_std2natural(self):
means, scales, shape, rate = self.prior.to_std_parameters()
nparams = self.prior.to_natural_parameters(means, scales, shape, rate)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Joint Normal Gamma.
########################################################################
class TestJointNormalGammaPrior(BaseTestPrior):
def setUp(self):
dim = 10
k = 3
self.means = 3 + torch.zeros(k, dim).type(self.type)
self.scales = 2.5 * torch.ones(k).type(self.type)
self.shape = torch.tensor(3).type(self.type)
self.rates = 2 * torch.ones(dim).type(self.type)
self.prior = beer.priors.JointNormalGammaPrior(self.means, self.scales,
self.shape, self.rates)
def test_natural2std(self):
means, scales, shape, rates = \
self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(means.numpy(), self.means.numpy())
self.assertArraysAlmostEqual(scales.numpy(), self.scales.numpy())
self.assertArraysAlmostEqual(shape.numpy(), self.shape.numpy())
self.assertArraysAlmostEqual(rates.numpy(), self.rates.numpy())
def test_std2natural(self):
means, scales, shape, rates = self.prior.to_std_parameters()
nparams = self.prior.to_natural_parameters(means, scales, shape, rates)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
########################################################################
# Joint Normal Wishart.
########################################################################
class TestJointNormalWishartPrior(BaseTestPrior):
def setUp(self):
dim = 10
k = 3
self.means = 3 + torch.zeros(k, dim).type(self.type)
self.scales = 2.5 * torch.ones(k).type(self.type)
self.mean_precision = torch.eye(dim).type(self.type) \
+ .1 * torch.ger(self.means[0], self.means[0])
self.dof = torch.tensor(dim + 2).type(self.type)
self.prior = beer.priors.JointNormalWishartPrior(self.means, self.scales,
self.mean_precision, self.dof)
def test_natural2std(self):
means, scales, mean_precision, dof = \
self.prior.to_std_parameters(self.prior.natural_parameters)
self.assertArraysAlmostEqual(means.numpy(), self.means.numpy())
self.assertArraysAlmostEqual(scales.numpy(), self.scales.numpy())
self.assertArraysAlmostEqual(mean_precision.numpy(), self.mean_precision.numpy())
self.assertArraysAlmostEqual(dof.numpy(), self.dof.numpy())
def test_std2natural(self):
means, scales, mean_precision, dof = self.prior.to_std_parameters()
nparams = self.prior.to_natural_parameters(means, scales, mean_precision, dof)
self.assertArraysAlmostEqual(nparams.numpy(),
self.prior.natural_parameters.numpy())
__all__ = [
'TestDirichletPrior',
'TestGammaPrior',
'TestNormalFullCovariancePrior',
'TestIsotropicNormalGammaPrior',
'TestJointIsotropicNormalGammaPrior',
'TestJointNormalGammaPrior',
'TestJointNormalWishartPrior',
'TestNormalGammaPrior',
'TestNormalWishartPrior',
'TestWishartPrior'
]
| 42.273292
| 89
| 0.580223
| 1,339
| 13,612
| 5.790889
| 0.071695
| 0.080088
| 0.049523
| 0.061904
| 0.813903
| 0.803585
| 0.798942
| 0.763348
| 0.727753
| 0.704282
| 0
| 0.00818
| 0.209668
| 13,612
| 321
| 90
| 42.404984
| 0.712586
| 0.01484
| 0
| 0.626667
| 0
| 0
| 0.022278
| 0.013851
| 0
| 0
| 0
| 0
| 0.186667
| 1
| 0.137778
| false
| 0
| 0.022222
| 0
| 0.208889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9ae0ad7d8e5c931eee5b2d6ddc09b58506666f70
| 173
|
py
|
Python
|
algorithm/__init__.py
|
zzhmark/insitu
|
6b0f51e9b92601ca80b94b4c1c6b82655fd1b68a
|
[
"MIT"
] | 1
|
2020-09-07T08:28:56.000Z
|
2020-09-07T08:28:56.000Z
|
algorithm/__init__.py
|
zzhmark/insitu
|
6b0f51e9b92601ca80b94b4c1c6b82655fd1b68a
|
[
"MIT"
] | null | null | null |
algorithm/__init__.py
|
zzhmark/insitu
|
6b0f51e9b92601ca80b94b4c1c6b82655fd1b68a
|
[
"MIT"
] | null | null | null |
from .batch import batch_apply
from .preprocessing import extract, register
from .GMM import global_gmm, local_gmm
from .scoring import global_gmm_compare, local_gmm_compare
| 43.25
| 58
| 0.855491
| 26
| 173
| 5.423077
| 0.461538
| 0.170213
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104046
| 173
| 4
| 58
| 43.25
| 0.909677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9ae6c715427b06f449d9a8409cc86e92e851bb7e
| 36
|
py
|
Python
|
src/linkstation/__init__.py
|
iKaew/linkstation
|
098e145a00c447bb4cf480b60645ae383136c3d9
|
[
"MIT"
] | null | null | null |
src/linkstation/__init__.py
|
iKaew/linkstation
|
098e145a00c447bb4cf480b60645ae383136c3d9
|
[
"MIT"
] | null | null | null |
src/linkstation/__init__.py
|
iKaew/linkstation
|
098e145a00c447bb4cf480b60645ae383136c3d9
|
[
"MIT"
] | null | null | null |
from .linkstation import LinkStation
| 36
| 36
| 0.888889
| 4
| 36
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9aed91d742e76d2c41e4a10555e49d79b6f2f884
| 10,800
|
py
|
Python
|
tests/test_core/test_core.py
|
TomVollerthun1337/logsmith
|
f2ecab4dea295d5493a9a3e77a2837b13fa139e5
|
[
"Apache-2.0"
] | 19
|
2020-01-18T00:25:43.000Z
|
2022-03-14T07:39:08.000Z
|
tests/test_core/test_core.py
|
TomVollerthun1337/logsmith
|
f2ecab4dea295d5493a9a3e77a2837b13fa139e5
|
[
"Apache-2.0"
] | 85
|
2020-01-21T12:13:56.000Z
|
2022-03-31T04:01:03.000Z
|
tests/test_core/test_core.py
|
TomVollerthun1337/logsmith
|
f2ecab4dea295d5493a9a3e77a2837b13fa139e5
|
[
"Apache-2.0"
] | 2
|
2020-06-25T06:15:19.000Z
|
2021-02-15T18:17:38.000Z
|
from unittest import TestCase, mock
from unittest.mock import call, Mock
from app.core.config import Config
from app.core.result import Result
from app.core.core import Core
from tests.test_data.test_accounts import get_test_accounts
from tests.test_data.test_results import get_success_result, get_error_result, get_failed_result
class TestCore(TestCase):
@mock.patch('app.core.core.Config.load_from_disk')
def setUp(self, mock_config):
self.core = Core()
self.config = Config()
self.config.set_accounts(get_test_accounts())
self.core.config = self.config
self.success_result = get_success_result()
self.fail_result = get_failed_result()
self.error_result = get_error_result()
@mock.patch('app.core.core.credentials')
def test_login__no_access_key(self, mock_credentials):
mock_credentials.check_access_key.return_value = self.error_result
result = self.core.login(self.config.get_group('development'), None)
expected = [call.check_access_key()]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(self.error_result, result)
@mock.patch('app.core.core.credentials')
def test_login__session_token_error(self, mock_credentials):
mock_credentials.check_access_key.return_value = self.success_result
mock_credentials.check_session.return_value = self.error_result
result = self.core.login(self.config.get_group('development'), None)
expected = [call.check_access_key(), call.check_session()]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(self.error_result, result)
@mock.patch('app.core.core.credentials')
def test_login__mfa_error(self, mock_credentials):
mock_credentials.check_access_key.return_value = self.success_result
mock_credentials.check_session.return_value = self.fail_result
self.core._renew_session = Mock()
self.core._renew_session.return_value = self.error_result
result = self.core.login(self.config.get_group('development'), None)
expected = [call.check_access_key(), call.check_session()]
self.assertEqual(expected, mock_credentials.mock_calls)
expected = [call(None)]
self.assertEqual(expected, self.core._renew_session.mock_calls)
self.assertEqual(self.error_result, result)
@mock.patch('app.core.core.files')
@mock.patch('app.core.core.credentials')
def test_login__successful_login(self, mock_credentials, _):
mock_credentials.check_access_key.return_value = self.success_result
mock_credentials.check_session.return_value = self.success_result
self.core._renew_session = Mock()
self.core._renew_session.return_value = self.success_result
mock_credentials.get_user_name.return_value = 'test-user'
mock_credentials.fetch_role_credentials.return_value = self.success_result
mock_credentials.write_profile_config.return_value = self.success_result
self.core._handle_support_files = Mock()
mock_mfa_callback = Mock()
profile_group = self.config.get_group('development')
result = self.core.login(profile_group, mock_mfa_callback)
expected = [call.check_access_key(),
call.check_session(),
call.get_user_name(),
call.fetch_role_credentials('test-user', profile_group),
call.write_profile_config(profile_group, 'us-east-1')]
self.assertEqual(expected, mock_credentials.mock_calls)
expected = [call(profile_group)]
self.assertEqual(expected, self.core._handle_support_files.mock_calls)
self.assertEqual(profile_group, self.core.active_profile_group)
self.assertEqual(None, self.core.region_override)
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
@mock.patch('app.core.core.credentials')
def test_login__logout(self, mock_credentials):
mock_credentials.fetch_role_credentials.return_value = self.success_result
mock_credentials.write_profile_config.return_value = self.success_result
result = self.core.logout()
expected = [call.fetch_role_credentials(user_name='none', profile_group=self.core.empty_profile_group),
call.write_profile_config(profile_group=self.core.empty_profile_group, region='')]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
@mock.patch('app.core.core.credentials')
def test_login__logout_error(self, mock_credentials):
mock_credentials.fetch_role_credentials.return_value = self.error_result
result = self.core.logout()
expected = [call.fetch_role_credentials(user_name='none', profile_group=self.core.empty_profile_group)]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(self.error_result, result)
@mock.patch('app.core.core.credentials')
def test_rotate_access_key__no_access_key(self, mock_credentials):
mock_credentials.check_access_key.return_value = self.error_result
result = self.core.rotate_access_key()
expected = [call.check_access_key()]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(self.error_result, result)
@mock.patch('app.core.core.iam')
@mock.patch('app.core.core.credentials')
def test_rotate_access_key__successful_rotate(self, mock_credentials, mock_iam):
mock_credentials.check_access_key.return_value = self.success_result
mock_credentials.check_session.return_value = self.success_result
mock_credentials.get_user_name.return_value = 'test-user'
mock_credentials.get_access_key_id.return_value = '12345'
access_key_result = Result()
access_key_result.add_payload({
'AccessKeyId': 12345,
'SecretAccessKey': 67890
})
access_key_result.set_success()
mock_iam.create_access_key.return_value = access_key_result
result = self.core.rotate_access_key()
expected = [call.check_access_key(),
call.check_session(),
call.get_user_name(),
call.get_access_key_id(),
call.set_access_key(key_id=12345, access_key=67890)]
self.assertEqual(expected, mock_credentials.mock_calls)
expected = [call.create_access_key('test-user'),
call.delete_iam_access_key('test-user', '12345')]
self.assertEqual(expected, mock_iam.mock_calls)
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
def test_get_region__not_logged_in(self):
region = self.core.get_region()
self.assertEqual(None, region)
def test_get_region__active_profile_group(self):
self.core.active_profile_group = self.config.get_group('development')
region = self.core.get_region()
self.assertEqual('us-east-1', region)
def test_get_region__region_overwrite(self):
self.core.active_profile_group = self.config.get_group('development')
self.core.region_override = 'eu-north-1'
region = self.core.get_region()
self.assertEqual('eu-north-1', region)
@mock.patch('app.core.core.mfa')
@mock.patch('app.core.core.credentials')
def test__renew_session__token_from_shell(self, mock_credentials, mock_mfa_shell):
mock_mfa_shell.fetch_mfa_token_from_shell.return_value = '12345'
mock_credentials.fetch_session_token.return_value = self.success_result
mock_mfa_callback = Mock()
result = self.core._renew_session(mock_mfa_callback)
self.assertEqual(0, mock_mfa_callback.call_count)
expected = [call.fetch_session_token('12345')]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
@mock.patch('app.core.core.mfa')
@mock.patch('app.core.core.credentials')
def test__renew_session__no_token_from_mfa_callback(self, mock_credentials, mock_mfa_shell):
mock_mfa_shell.fetch_mfa_token_from_shell.return_value = None
mock_credentials.fetch_session_token.return_value = self.success_result
mock_mfa_callback = Mock()
mock_mfa_callback.return_value = ''
result = self.core._renew_session(mock_mfa_callback)
self.assertEqual(1, mock_mfa_callback.call_count)
expected = []
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(False, result.was_success)
self.assertEqual(True, result.was_error)
@mock.patch('app.core.core.mfa')
@mock.patch('app.core.core.credentials')
def test__renew_session__token_from_mfa_callback(self, mock_credentials, mock_mfa_shell):
mock_mfa_shell.fetch_mfa_token_from_shell.return_value = None
mock_credentials.fetch_session_token.return_value = self.success_result
mock_mfa_callback = Mock()
mock_mfa_callback.return_value = '12345'
result = self.core._renew_session(mock_mfa_callback)
self.assertEqual(1, mock_mfa_callback.call_count)
expected = [call.fetch_session_token('12345')]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
@mock.patch('app.core.core.credentials')
def test__set_region__not_logged_in(self, mock_credentials):
mock_credentials.write_profile_config.return_value = self.success_result
result = self.core.set_region('eu-north-1')
expected = []
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual('eu-north-1', self.core.region_override)
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
@mock.patch('app.core.core.credentials')
def test__set_region__logged_in(self, mock_credentials):
mock_credentials.write_profile_config.return_value = self.success_result
self.core.active_profile_group = self.config.get_group('development')
result = self.core.set_region('eu-north-1')
expected = [call.write_profile_config(self.config.get_group('development'), 'eu-north-1')]
self.assertEqual(expected, mock_credentials.mock_calls)
self.assertEqual('eu-north-1', self.core.region_override)
self.assertEqual(True, result.was_success)
self.assertEqual(False, result.was_error)
| 42.687747
| 111
| 0.718796
| 1,365
| 10,800
| 5.331136
| 0.065201
| 0.101003
| 0.067885
| 0.041775
| 0.84664
| 0.803078
| 0.791123
| 0.772021
| 0.760066
| 0.730521
| 0
| 0.007024
| 0.182685
| 10,800
| 252
| 112
| 42.857143
| 0.817379
| 0
| 0
| 0.587302
| 0
| 0
| 0.067778
| 0.033333
| 0
| 0
| 0
| 0
| 0.248677
| 1
| 0.089947
| false
| 0
| 0.037037
| 0
| 0.132275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b10c673c60ab16d18a47df85e8a0518a30dea1bb
| 11,931
|
py
|
Python
|
apis/nb/clients/identity_manager_client/V2ScalablegroupApi.py
|
CiscoDevNet/APIC-EM-Generic-Scripts-
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 45
|
2016-06-09T15:41:25.000Z
|
2019-08-06T17:13:11.000Z
|
apis/nb/clients/identity_manager_client/V2ScalablegroupApi.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2016-06-12T03:03:56.000Z
|
2017-03-13T18:20:11.000Z
|
apis/nb/clients/identity_manager_client/V2ScalablegroupApi.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 15
|
2016-06-22T03:51:37.000Z
|
2019-07-10T10:06:02.000Z
|
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
import sys
import os
import urllib.request, urllib.parse, urllib.error
from .models import *
class V2ScalablegroupApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getScalableGroupByFilters(self, **kwargs):
"""Retrieves scalable group based on a given filter
Args:
name, str: Retrieve policies for a given name (required)
offset, str: Starting index of the resources (1 based) (required)
limit, str: Number of resources to return (required)
Returns: ScalableGroupListResult
"""
allParams = ['name', 'offset', 'limit']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getScalableGroupByFilters" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/scalable-group'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('name' in params):
queryParams['name'] = self.apiClient.toPathValue(params['name'])
if ('offset' in params):
queryParams['offset'] = self.apiClient.toPathValue(params['offset'])
if ('limit' in params):
queryParams['limit'] = self.apiClient.toPathValue(params['limit'])
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ScalableGroupListResult')
return responseObject
def updateScalableGroups(self, **kwargs):
"""Updates existing scalable group.
Args:
scalableGroupDtos, list[ScalableGroupDTO]: scalableGroupDtos (required)
Returns: TaskIdResult
"""
allParams = ['scalableGroupDtos']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method updateScalableGroups" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/scalable-group'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('scalableGroupDtos' in params):
bodyParam = params['scalableGroupDtos']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def addScalableGroups(self, **kwargs):
"""Creates scalable group inheriting properties from an existing scalable group.
Args:
scalableGroupDtos, list[ScalableGroupDTO]: scalableGroupDtos (required)
Returns: TaskIdResult
"""
allParams = ['scalableGroupDtos']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method addScalableGroups" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/scalable-group'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('scalableGroupDtos' in params):
bodyParam = params['scalableGroupDtos']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def getCount(self, **kwargs):
"""getCount
Args:
Returns: CountResult
"""
allParams = []
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getCount" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/scalable-group/count'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'CountResult')
return responseObject
def getEndPointGroupbyId(self, **kwargs):
"""List scalable group based on id
Args:
id, str: id (required)
Returns: ScalableGroupResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getEndPointGroupbyId" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/scalable-group/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ScalableGroupResult')
return responseObject
def deleteScalableGroup(self, **kwargs):
"""Delete a scalable group by its id.
Args:
id, str: id (required)
Returns: TaskIdResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteScalableGroup" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/scalable-group/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'TaskIdResult')
return responseObject
def getUserByScalableGroupId(self, **kwargs):
"""List scalable group based on id
Args:
id, str: id (required)
Returns: ApicEmUserListResult
"""
allParams = ['id']
params = locals()
for (key, val) in list(params['kwargs'].items()):
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getUserByScalableGroupId" % key)
params[key] = val
del params['kwargs']
resourcePath = '/v2/scalable-group/{id}/users'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
headerParams['Accept'] = 'application/json'
headerParams['Content-Type'] = 'application/json'
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.parse.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
if not response:
return None
responseObject = self.apiClient.deserialize(response, 'ApicEmUserListResult')
return responseObject
| 25.277542
| 116
| 0.526025
| 945
| 11,931
| 6.637037
| 0.142857
| 0.045599
| 0.049426
| 0.020089
| 0.781091
| 0.781091
| 0.781091
| 0.776626
| 0.776626
| 0.776626
| 0
| 0.001215
| 0.379013
| 11,931
| 471
| 117
| 25.33121
| 0.845323
| 0.100159
| 0
| 0.816425
| 0
| 0
| 0.144871
| 0.016738
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038647
| false
| 0
| 0.019324
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
49082ded2358d451f3a446c6629c4569ee6c21fc
| 33
|
py
|
Python
|
utils/parsers/__init__.py
|
jvsn19/tcc
|
8ca9434deb50903ef23e712a079ed159dd7c28b6
|
[
"MIT"
] | null | null | null |
utils/parsers/__init__.py
|
jvsn19/tcc
|
8ca9434deb50903ef23e712a079ed159dd7c28b6
|
[
"MIT"
] | 1
|
2021-02-25T22:37:49.000Z
|
2021-02-25T22:37:49.000Z
|
utils/parsers/__init__.py
|
jvsn19/tcc
|
8ca9434deb50903ef23e712a079ed159dd7c28b6
|
[
"MIT"
] | null | null | null |
from .TCCParser import TCCParser
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
49197d58f50419a076f1129a8ab1be883c2698d6
| 42
|
py
|
Python
|
runtime/musl-lkl/apps/python/helloworld.py
|
dme26/intravisor
|
9bf9c50aa14616bd9bd66eee47623e8b61514058
|
[
"MIT"
] | 11
|
2022-02-05T12:12:43.000Z
|
2022-03-08T08:09:08.000Z
|
runtime/musl-lkl/apps/python/helloworld.py
|
dme26/intravisor
|
9bf9c50aa14616bd9bd66eee47623e8b61514058
|
[
"MIT"
] | null | null | null |
runtime/musl-lkl/apps/python/helloworld.py
|
dme26/intravisor
|
9bf9c50aa14616bd9bd66eee47623e8b61514058
|
[
"MIT"
] | 1
|
2022-02-22T20:32:22.000Z
|
2022-02-22T20:32:22.000Z
|
print("Hello world from CHERI python\n");
| 21
| 41
| 0.738095
| 7
| 42
| 4.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.738095
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
4953c7d4287407342e1c774902be0cc3504df9b9
| 26
|
py
|
Python
|
models/__init__.py
|
bolero2/vggnet-torch
|
912046be3f0581e0217c2cf5b596e6318aad241b
|
[
"Apache-2.0"
] | 2
|
2021-04-23T03:49:30.000Z
|
2021-04-23T03:49:33.000Z
|
models/__init__.py
|
bolero2/vggnet-torch
|
912046be3f0581e0217c2cf5b596e6318aad241b
|
[
"Apache-2.0"
] | null | null | null |
models/__init__.py
|
bolero2/vggnet-torch
|
912046be3f0581e0217c2cf5b596e6318aad241b
|
[
"Apache-2.0"
] | null | null | null |
from .models import VGGNet
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4967a3e2a09b3fe98ed0fc8d035d3e866c664dc5
| 70
|
py
|
Python
|
continual_learning/methods/task_incremental/multi_task/gg/super_mask_pruning/base/__init__.py
|
jaryP/ContinualAI
|
7d9b7614066d219ebd72049692da23ad6ec132b0
|
[
"MIT"
] | null | null | null |
continual_learning/methods/task_incremental/multi_task/gg/super_mask_pruning/base/__init__.py
|
jaryP/ContinualAI
|
7d9b7614066d219ebd72049692da23ad6ec132b0
|
[
"MIT"
] | null | null | null |
continual_learning/methods/task_incremental/multi_task/gg/super_mask_pruning/base/__init__.py
|
jaryP/ContinualAI
|
7d9b7614066d219ebd72049692da23ad6ec132b0
|
[
"MIT"
] | null | null | null |
from .distributions import *
from .layer import *
from .utils import *
| 23.333333
| 28
| 0.757143
| 9
| 70
| 5.888889
| 0.555556
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157143
| 70
| 3
| 29
| 23.333333
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
498d46e1852df10e7f414e05b90a9d8080e31f36
| 99
|
py
|
Python
|
override/__main__.py
|
davidhyman/override
|
e34bd3c8676233439de5c002367b3bff5c1b88d6
|
[
"MIT"
] | null | null | null |
override/__main__.py
|
davidhyman/override
|
e34bd3c8676233439de5c002367b3bff5c1b88d6
|
[
"MIT"
] | 1
|
2017-07-11T22:03:27.000Z
|
2017-07-11T22:03:27.000Z
|
override/__main__.py
|
davidhyman/override
|
e34bd3c8676233439de5c002367b3bff5c1b88d6
|
[
"MIT"
] | null | null | null |
from override.cli import init_from_command
if __name__ == '__main__':
init_from_command()
| 19.8
| 43
| 0.737374
| 13
| 99
| 4.692308
| 0.692308
| 0.262295
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 99
| 4
| 44
| 24.75
| 0.753086
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
77053c923f7a5dc49b4a3bfa5219f2cbffa44bce
| 178
|
py
|
Python
|
virl/generators/__init__.py
|
gve-vse-tim/virlutils
|
64687229ea8763509aca54b63144b61037e5228f
|
[
"MIT"
] | 12
|
2018-03-27T14:02:22.000Z
|
2018-06-07T16:19:38.000Z
|
virl/generators/__init__.py
|
gve-vse-tim/virlutils
|
64687229ea8763509aca54b63144b61037e5228f
|
[
"MIT"
] | 29
|
2017-12-14T16:38:12.000Z
|
2018-08-19T18:41:06.000Z
|
virl/generators/__init__.py
|
gve-vse-tim/virlutils
|
64687229ea8763509aca54b63144b61037e5228f
|
[
"MIT"
] | 7
|
2018-03-02T15:42:22.000Z
|
2020-04-20T11:25:32.000Z
|
from .pyats_testbed import pyats_testbed_generator # noqa
from .ansible_inventory import ansible_inventory_generator # noqa
from .nso_payload import nso_payload_generator # noqa
| 44.5
| 65
| 0.865169
| 24
| 178
| 6.041667
| 0.416667
| 0.268966
| 0.234483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101124
| 178
| 3
| 66
| 59.333333
| 0.90625
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
773cfab9368b9d43a339a35fda07e8f99d190375
| 42,300
|
py
|
Python
|
pybind/slxos/v17r_2_00/isis_state/interface_detail/isis_intf/circ_metrics/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/isis_state/interface_detail/isis_intf/circ_metrics/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v17r_2_00/isis_state/interface_detail/isis_intf/circ_metrics/__init__.py
|
extremenetworks/pybind
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class circ_metrics(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isis-operational - based on the path /isis-state/interface-detail/isis-intf/circ-metrics. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: ISIS circuit attributes
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__level','__auth_check','__auth_mode','__auth_key','__circ_metric','__ip6_circ_metric','__circ_priority','__hello_int','__hello_mult','__dis','__dis_ch','__next_hello','__active_adj',)
_yang_name = 'circ-metrics'
_rest_name = 'circ-metrics'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__hello_int = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__ip6_circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__auth_key = YANGDynClass(base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
self.__circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__dis_ch = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__active_adj = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__level = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'isis-level1-2': {'value': 0}, u'isis-level1': {'value': 1}, u'isis-level2': {'value': 2}},), is_leaf=True, yang_name="level", rest_name="level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-level', is_config=False)
self.__auth_mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)
self.__circ_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)
self.__dis = YANGDynClass(base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
self.__next_hello = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__auth_check = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
self.__hello_mult = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'isis-state', u'interface-detail', u'isis-intf', u'circ-metrics']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'isis-state', u'interface-detail', u'isis-intf', u'circ-metrics']
def _get_level(self):
"""
Getter method for level, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/level (isis-level)
YANG Description: ISIS operation mode
"""
return self.__level
def _set_level(self, v, load=False):
"""
Setter method for level, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/level (isis-level)
If this variable is read-only (config: false) in the
source YANG file, then _set_level is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_level() directly.
YANG Description: ISIS operation mode
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'isis-level1-2': {'value': 0}, u'isis-level1': {'value': 1}, u'isis-level2': {'value': 2}},), is_leaf=True, yang_name="level", rest_name="level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-level', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """level must be of a type compatible with isis-level""",
'defined-type': "brocade-isis-operational:isis-level",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'isis-level1-2': {'value': 0}, u'isis-level1': {'value': 1}, u'isis-level2': {'value': 2}},), is_leaf=True, yang_name="level", rest_name="level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-level', is_config=False)""",
})
self.__level = t
if hasattr(self, '_set'):
self._set()
def _unset_level(self):
self.__level = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'isis-level1-2': {'value': 0}, u'isis-level1': {'value': 1}, u'isis-level2': {'value': 2}},), is_leaf=True, yang_name="level", rest_name="level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-level', is_config=False)
def _get_auth_check(self):
"""
Getter method for auth_check, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/auth_check (isis-status)
YANG Description: If authentication enabled on incoming IS-IS PDUs
"""
return self.__auth_check
def _set_auth_check(self, v, load=False):
"""
Setter method for auth_check, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/auth_check (isis-status)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_check is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_check() directly.
YANG Description: If authentication enabled on incoming IS-IS PDUs
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_check must be of a type compatible with isis-status""",
'defined-type': "brocade-isis-operational:isis-status",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)""",
})
self.__auth_check = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_check(self):
self.__auth_check = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="auth-check", rest_name="auth-check", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)
def _get_auth_mode(self):
"""
Getter method for auth_mode, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/auth_mode (auth-mode)
YANG Description: IS-IS authentication mode
"""
return self.__auth_mode
def _set_auth_mode(self, v, load=False):
"""
Setter method for auth_mode, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/auth_mode (auth-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_mode() directly.
YANG Description: IS-IS authentication mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_mode must be of a type compatible with auth-mode""",
'defined-type': "brocade-isis-operational:auth-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)""",
})
self.__auth_mode = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_mode(self):
self.__auth_mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'none': {'value': 0}, u'cleartext': {'value': 1}, u'md5': {'value': 2}},), is_leaf=True, yang_name="auth-mode", rest_name="auth-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='auth-mode', is_config=False)
def _get_auth_key(self):
"""
Getter method for auth_key, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/auth_key (string)
YANG Description: IS-IS authentication key
"""
return self.__auth_key
def _set_auth_key(self, v, load=False):
"""
Setter method for auth_key, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/auth_key (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_auth_key is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_auth_key() directly.
YANG Description: IS-IS authentication key
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """auth_key must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)""",
})
self.__auth_key = t
if hasattr(self, '_set'):
self._set()
def _unset_auth_key(self):
self.__auth_key = YANGDynClass(base=unicode, is_leaf=True, yang_name="auth-key", rest_name="auth-key", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
def _get_circ_metric(self):
"""
Getter method for circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/circ_metric (uint32)
YANG Description: ISIS circuit Metric
"""
return self.__circ_metric
def _set_circ_metric(self, v, load=False):
"""
Setter method for circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/circ_metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_metric() directly.
YANG Description: ISIS circuit Metric
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__circ_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_metric(self):
self.__circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="circ-metric", rest_name="circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_ip6_circ_metric(self):
"""
Getter method for ip6_circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/ip6_circ_metric (uint32)
YANG Description: ISISv6 circuit Metric
"""
return self.__ip6_circ_metric
def _set_ip6_circ_metric(self, v, load=False):
"""
Setter method for ip6_circ_metric, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/ip6_circ_metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip6_circ_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip6_circ_metric() directly.
YANG Description: ISISv6 circuit Metric
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip6_circ_metric must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__ip6_circ_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_ip6_circ_metric(self):
self.__ip6_circ_metric = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ip6-circ-metric", rest_name="ip6-circ-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_circ_priority(self):
"""
Getter method for circ_priority, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/circ_priority (uint8)
YANG Description: Circuit Priority
"""
return self.__circ_priority
def _set_circ_priority(self, v, load=False):
"""
Setter method for circ_priority, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/circ_priority (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_circ_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_circ_priority() directly.
YANG Description: Circuit Priority
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """circ_priority must be of a type compatible with uint8""",
'defined-type': "uint8",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)""",
})
self.__circ_priority = t
if hasattr(self, '_set'):
self._set()
def _unset_circ_priority(self):
self.__circ_priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="circ-priority", rest_name="circ-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint8', is_config=False)
def _get_hello_int(self):
"""
Getter method for hello_int, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/hello_int (uint32)
YANG Description: Hello interval
"""
return self.__hello_int
def _set_hello_int(self, v, load=False):
"""
Setter method for hello_int, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/hello_int (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_hello_int is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hello_int() directly.
YANG Description: Hello interval
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hello_int must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__hello_int = t
if hasattr(self, '_set'):
self._set()
def _unset_hello_int(self):
self.__hello_int = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-int", rest_name="hello-int", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_hello_mult(self):
"""
Getter method for hello_mult, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/hello_mult (uint32)
YANG Description: Hello multiplier
"""
return self.__hello_mult
def _set_hello_mult(self, v, load=False):
"""
Setter method for hello_mult, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/hello_mult (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_hello_mult is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_hello_mult() directly.
YANG Description: Hello multiplier
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """hello_mult must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__hello_mult = t
if hasattr(self, '_set'):
self._set()
def _unset_hello_mult(self):
self.__hello_mult = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="hello-mult", rest_name="hello-mult", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_dis(self):
"""
Getter method for dis, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/dis (string)
YANG Description: Designated IS
"""
return self.__dis
def _set_dis(self, v, load=False):
"""
Setter method for dis, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/dis (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_dis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dis() directly.
YANG Description: Designated IS
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dis must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)""",
})
self.__dis = t
if hasattr(self, '_set'):
self._set()
def _unset_dis(self):
self.__dis = YANGDynClass(base=unicode, is_leaf=True, yang_name="dis", rest_name="dis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
def _get_dis_ch(self):
"""
Getter method for dis_ch, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/dis_ch (uint32)
YANG Description: DIS changes
"""
return self.__dis_ch
def _set_dis_ch(self, v, load=False):
"""
Setter method for dis_ch, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/dis_ch (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dis_ch is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dis_ch() directly.
YANG Description: DIS changes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dis_ch must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__dis_ch = t
if hasattr(self, '_set'):
self._set()
def _unset_dis_ch(self):
self.__dis_ch = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dis-ch", rest_name="dis-ch", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_next_hello(self):
"""
Getter method for next_hello, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/next_hello (uint32)
YANG Description: Time remaining until next hello
"""
return self.__next_hello
def _set_next_hello(self, v, load=False):
"""
Setter method for next_hello, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/next_hello (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hello is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hello() directly.
YANG Description: Time remaining until next hello
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hello must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__next_hello = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hello(self):
self.__next_hello = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="next-hello", rest_name="next-hello", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_active_adj(self):
"""
Getter method for active_adj, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/active_adj (uint32)
YANG Description: Number of active adjacencies
"""
return self.__active_adj
def _set_active_adj(self, v, load=False):
"""
Setter method for active_adj, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_metrics/active_adj (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_active_adj is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_active_adj() directly.
YANG Description: Number of active adjacencies
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """active_adj must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__active_adj = t
if hasattr(self, '_set'):
self._set()
def _unset_active_adj(self):
self.__active_adj = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="active-adj", rest_name="active-adj", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
level = __builtin__.property(_get_level)
auth_check = __builtin__.property(_get_auth_check)
auth_mode = __builtin__.property(_get_auth_mode)
auth_key = __builtin__.property(_get_auth_key)
circ_metric = __builtin__.property(_get_circ_metric)
ip6_circ_metric = __builtin__.property(_get_ip6_circ_metric)
circ_priority = __builtin__.property(_get_circ_priority)
hello_int = __builtin__.property(_get_hello_int)
hello_mult = __builtin__.property(_get_hello_mult)
dis = __builtin__.property(_get_dis)
dis_ch = __builtin__.property(_get_dis_ch)
next_hello = __builtin__.property(_get_next_hello)
active_adj = __builtin__.property(_get_active_adj)
_pyangbind_elements = {'level': level, 'auth_check': auth_check, 'auth_mode': auth_mode, 'auth_key': auth_key, 'circ_metric': circ_metric, 'ip6_circ_metric': ip6_circ_metric, 'circ_priority': circ_priority, 'hello_int': hello_int, 'hello_mult': hello_mult, 'dis': dis, 'dis_ch': dis_ch, 'next_hello': next_hello, 'active_adj': active_adj, }
| 70.265781
| 610
| 0.725839
| 5,692
| 42,300
| 5.123858
| 0.038651
| 0.040459
| 0.081468
| 0.050883
| 0.886096
| 0.860175
| 0.840665
| 0.83103
| 0.821944
| 0.816595
| 0
| 0.016329
| 0.14435
| 42,300
| 601
| 611
| 70.382696
| 0.789468
| 0.182057
| 0
| 0.48494
| 0
| 0.039157
| 0.360276
| 0.189733
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126506
| false
| 0
| 0.024096
| 0
| 0.262048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
774192f08d5bb7ebb989858494927682c42cfedf
| 3,802
|
py
|
Python
|
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/ha/reload/iosxe/reload.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | null | null | null |
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/ha/reload/iosxe/reload.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | null | null | null |
pkgs/sdk-pkg/src/genie/libs/sdk/triggers/ha/reload/iosxe/reload.py
|
miott/genielibs
|
6464642cdd67aa2367bdbb12561af4bb060e5e62
|
[
"Apache-2.0"
] | null | null | null |
'''IOSXE implementation for Reload triggers'''
# import ats
from ats import aetest
# Genie Libs
from genie.libs.sdk.triggers.ha.ha import \
TriggerReload as CommonReload, \
TriggerReloadLc
class TriggerReload(CommonReload):
@aetest.setup
def verify_prerequisite(self, uut, abstract, steps, timeout):
'''Learn Ops object and verify the requirements.
If the requirements are not satisfied, then skip to the next
testcase.
Args:
uut (`obj`): Device object.
abstract (`obj`): Abstract object.
steps (`step obj`): aetest step object
timeout (`timeout obj`): Timeout Object
Returns:
None
Raises:
pyATS Results
'''
self.skipped('No implementation for generic iosxe HA reload',
goto=['next_tc'])
class TriggerReloadActiveRP(CommonReload):
@aetest.setup
def verify_prerequisite(self, uut, abstract, steps, timeout):
'''Learn Ops object and verify the requirements.
If the requirements are not satisfied, then skip to the next
testcase.
Args:
uut (`obj`): Device object.
abstract (`obj`): Abstract object.
steps (`step obj`): aetest step object
timeout (`timeout obj`): Timeout Object
Returns:
None
Raises:
pyATS Results
'''
self.skipped('No implementation for generic iosxe HA reload',
goto=['next_tc'])
class TriggerReloadStandbyRP(CommonReload):
@aetest.setup
def verify_prerequisite(self, uut, abstract, steps, timeout):
'''Learn Ops object and verify the requirements.
If the requirements are not satisfied, then skip to the next
testcase.
Args:
uut (`obj`): Device object.
abstract (`obj`): Abstract object.
steps (`step obj`): aetest step object
timeout (`timeout obj`): Timeout Object
Returns:
None
Raises:
pyATS Results
'''
self.skipped('No implementation for generic iosxe HA reload',
goto=['next_tc'])
class TriggerReloadMember(TriggerReloadLc):
@aetest.setup
def verify_prerequisite(self, uut, abstract, steps, timeout):
'''Learn Ops object and verify the requirements.
If the requirements are not satisfied, then skip to the next
testcase.
Args:
uut (`obj`): Device object.
abstract (`obj`): Abstract object.
steps (`step obj`): aetest step object
timeout (`timeout obj`): Timeout Object
Returns:
None
Raises:
pyATS Results
'''
self.skipped('No implementation for generic iosxe HA reload',
goto=['next_tc'])
class TriggerReloadActiveFP(TriggerReloadLc):
@aetest.setup
def verify_prerequisite(self, uut, abstract, steps, timeout):
'''Learn Ops object and verify the requirements.
If the requirements are not satisfied, then skip to the next
testcase.
Args:
uut (`obj`): Device object.
abstract (`obj`): Abstract object.
steps (`step obj`): aetest step object
timeout (`timeout obj`): Timeout Object
Returns:
None
Raises:
pyATS Results
'''
self.skipped('No implementation for generic iosxe HA reload',
goto=['next_tc'])
| 28.162963
| 71
| 0.548659
| 370
| 3,802
| 5.610811
| 0.162162
| 0.072254
| 0.033719
| 0.04817
| 0.879576
| 0.879576
| 0.879576
| 0.879576
| 0.879576
| 0.879576
| 0
| 0
| 0.372173
| 3,802
| 135
| 72
| 28.162963
| 0.869711
| 0.449237
| 0
| 0.689655
| 0
| 0
| 0.184006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.068966
| 0
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
774d460a44b1fd2cd9d90a55b8b07a67072600b3
| 276
|
py
|
Python
|
data-mining/cluster-analysis/assignment/clustering-data/python/evaluation.py
|
4979/courses
|
dd9efa0a6b60cead833f36a6bfa518dd4fece17f
|
[
"Apache-2.0"
] | null | null | null |
data-mining/cluster-analysis/assignment/clustering-data/python/evaluation.py
|
4979/courses
|
dd9efa0a6b60cead833f36a6bfa518dd4fece17f
|
[
"Apache-2.0"
] | null | null | null |
data-mining/cluster-analysis/assignment/clustering-data/python/evaluation.py
|
4979/courses
|
dd9efa0a6b60cead833f36a6bfa518dd4fece17f
|
[
"Apache-2.0"
] | null | null | null |
from math import log, sqrt
def purity(groundtruthAssignment, algorithmAssignment):
purity = 0
# TODO
# Compute the purity
return purity
def NMI(groundtruthAssignment, algorithmAssignment):
NMI = 0
# TODO
# Compute the NMI
return NMI
| 15.333333
| 56
| 0.67029
| 29
| 276
| 6.37931
| 0.517241
| 0.432432
| 0.12973
| 0.162162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.275362
| 276
| 17
| 57
| 16.235294
| 0.915
| 0.17029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
775bc3bb5cf03f67a9509200927ca3defa6a2451
| 11,302
|
py
|
Python
|
leap/datasets/sim_dataset.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | 7
|
2022-01-06T18:37:57.000Z
|
2022-03-20T17:11:30.000Z
|
leap/datasets/sim_dataset.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | null | null | null |
leap/datasets/sim_dataset.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | null | null | null |
import os
import glob
import torch
import random
import numpy as np
from torch.utils.data import Dataset
import ipdb as pdb
class SimulationDataset(Dataset):
def __init__(self, directory, transition="linear_nongaussian"):
super().__init__()
assert transition in ["linear_nongaussian", "post_nonlinear_gaussian",
"post_nonlinear_nongaussian", "post_nonlinear_nongaussian"]
self.path = os.path.join(directory, transition, "data.npz")
self.npz = np.load(self.path)
self.data = { }
for key in ["yt", "xt", "yt_", "xt_"]:
self.data[key] = self.npz[key]
def __len__(self):
return len(self.data["yt"])
def __getitem__(self, idx):
yt = torch.from_numpy(self.data["yt"][idx].astype('float32'))
xt = torch.from_numpy(self.data["xt"][idx].astype('float32'))
yt_ = torch.from_numpy(self.data["yt_"][idx].astype('float32')).unsqueeze(0)
xt_ = torch.from_numpy(np.expand_dims(self.data["xt_"][idx], axis=0).astype('float32'))
sample = {"yt": yt, "yt_": yt_, "xt": xt, "xt_": xt_}
return sample
class SimulationDatasetTwoSample(Dataset):
def __init__(self, transition="linear_nongaussian"):
super().__init__()
assert transition in ["linear_nongaussian", "post_nonlinear_gaussian",
"post_nonlinear_nongaussian", "post_nonlinear_nongaussian"]
self.path = os.path.join(DIR, transition, "data.npz")
self.npz = np.load(self.path)
self.data = { }
for key in ["yt", "xt", "yt_", "xt_"]:
self.data[key] = self.npz[key]
self.min = np.min(self.data["xt_"], axis=0).reshape(1, -1)
self.max = np.max(self.data["xt_"], axis=0).reshape(1, -1)
def __len__(self):
return len(self.data["yt"])
def __getitem__(self, idx):
yt = torch.from_numpy(self.data["yt"][idx])
xt = torch.from_numpy((self.data["xt"][idx]-self.min / (self.max-self.min)))
yt_ = torch.from_numpy(self.data["yt_"][idx]).unsqueeze(0)
xt_ = torch.from_numpy((np.expand_dims(self.data["xt_"][idx], axis=0)-self.min) / (self.max-self.min))
sample1 = {"yt": yt,
"yt_": yt_,
"xt": xt,
"xt_": xt_}
idx_rnd = random.randint(0, len(self.data["yt"])-1)
ytr = torch.from_numpy(self.data["yt"][idx_rnd])
xtr = torch.from_numpy((self.data["xt"][idx_rnd]-self.min / (self.max-self.min)))
ytr_ = torch.from_numpy(self.data["yt_"][idx_rnd]).unsqueeze(0)
xtr_ = torch.from_numpy((np.expand_dims(self.data["xt_"][idx_rnd], axis=0)-self.min) / (self.max-self.min))
sample2 = {"yt": ytr,
"yt_": ytr_,
"xt": xtr,
"xt_": xtr_}
return sample1, sample2
class TupleDataset(torch.utils.data.Dataset):
def __init__(self, split: str = "train"):
super().__init__()
assert split in ("train", "val")
with open(os.path.join(DIR, "%s.txt"%split), 'r') as f:
self.datum_names = [datum_name.rstrip() for datum_name in f.readlines()]
self.samples_per_datum = 64
def __len__(self):
return len(self.datum_names) * self.samples_per_datum
def __getitem__(self, idx):
datum_idx = idx // self.samples_per_datum
sample_idx = idx % self.samples_per_datum
self.datum_names = [ele.replace('\\', '/') for ele in self.datum_names]
datum = np.load(self.datum_names[datum_idx])
# latent factor
# yt = y_t (batch_size, length, size)
# yt_ = y_(t+1) (batch_size, 1, size)
# observed variable
# xt = x_t (batch_size, length, size)
# xt_ = x_(t+1) (batch_size, 1, size)
sample = {"yt": torch.from_numpy(datum["yt"][sample_idx]),
"yt_": torch.from_numpy(datum["yt_"][sample_idx]),
"xt": torch.from_numpy(datum["xt"][sample_idx]),
"xt_": torch.from_numpy(datum["xt_"][sample_idx])}
return sample
class SimulationDatasetTS(Dataset):
def __init__(self, directory, transition="linear_nongaussian_ts"):
super().__init__()
assert transition in ["linear_nongaussian_ts", "nonlinear_gaussian_ts",
"nonlinear_nongaussian_ts", "pnl_nongaussian_ts"]
self.path = os.path.join(directory, transition, "data.npz")
self.npz = np.load(self.path)
self.data = { }
for key in ["yt", "xt"]:
self.data[key] = self.npz[key]
def __len__(self):
return len(self.data["yt"])
def __getitem__(self, idx):
yt = torch.from_numpy(self.data["yt"][idx].astype('float32'))
xt = torch.from_numpy(self.data["xt"][idx].astype('float32'))
sample = {"yt": yt, "xt": xt}
return sample
class SimulationDatasetTSTwoSample(Dataset):
def __init__(self, directory, transition="linear_nongaussian_ts"):
super().__init__()
assert transition in ["linear_nongaussian_ts", "nonlinear_gaussian_ts", "nonlinear_gaussian_sparse_ts",
"nonlinear_nongaussian_ts", "pnl_nongaussian_ts", "instan_temporal", "case1_dependency"]
self.path = os.path.join(directory, transition, "data.npz")
self.npz = np.load(self.path)
self.data = { }
for key in ["yt", "xt"]:
self.data[key] = self.npz[key]
def __len__(self):
return len(self.data["yt"])
def __getitem__(self, idx):
yt = torch.from_numpy(self.data["yt"][idx].astype('float32'))
xt = torch.from_numpy(self.data["xt"][idx].astype('float32'))
idx_rnd = random.randint(0, len(self.data["yt"])-1)
ytr = torch.from_numpy(self.data["yt"][idx_rnd].astype('float32'))
xtr = torch.from_numpy(self.data["xt"][idx_rnd].astype('float32'))
sample = {"s1": {"yt": yt, "xt": xt},
"s2": {"yt": ytr, "xt": xtr}
}
return sample
class SimulationDatasetTSTwoSampleNS(Dataset):
def __init__(self, directory, transition="linear_nongaussian_ts"):
super().__init__()
self.path = os.path.join(directory, transition, "data.npz")
self.npz = np.load(self.path)
self.data = { }
for key in ["yt", "xt", "ct"]:
self.data[key] = self.npz[key]
def __len__(self):
return len(self.data["yt"])
def __getitem__(self, idx):
yt = torch.from_numpy(self.data["yt"][idx].astype('float32'))
xt = torch.from_numpy(self.data["xt"][idx].astype('float32'))
ct = torch.from_numpy(self.data["ct"][idx].astype('float32'))
idx_rnd = random.randint(0, len(self.data["yt"])-1)
ytr = torch.from_numpy(self.data["yt"][idx_rnd].astype('float32'))
xtr = torch.from_numpy(self.data["xt"][idx_rnd].astype('float32'))
ctr = torch.from_numpy(self.data["ct"][idx_rnd].astype('float32'))
sample = {"s1": {"yt": yt, "xt": xt, "ct": ct},
"s2": {"yt": ytr, "xt": xtr, "ct": ctr}
}
return sample
class SimulationDatasetPCL(Dataset):
def __init__(self, directory, transition="linear_nongaussian_ts", lags=2):
super().__init__()
assert transition in ["linear_nongaussian_ts", "nonlinear_gaussian_ts", "nonlinear_gaussian_sparse_ts", "nonlinear_gau_cins",
"nonlinear_nongaussian_ts", "nonlinear_ns", "nonlinear_gau_ns", "nonlinear_gau_cins_sparse"]
self.path = os.path.join(directory, transition, "data.npz")
self.npz = np.load(self.path)
self.L = lags
self.data = { }
for key in ["yt", "xt"]:
self.data[key] = self.npz[key]
def __len__(self):
return len(self.data["yt"])
def __getitem__(self, idx):
yt = torch.from_numpy(self.data["yt"][idx].astype('float32'))
xt = torch.from_numpy(self.data["xt"][idx].astype('float32'))
xt_cur, xt_his = self.seq_to_pairs(xt)
idx_rnd = random.randint(0, len(self.data["yt"])-1)
ytr = torch.from_numpy(self.data["yt"][idx_rnd].astype('float32'))
xtr = torch.from_numpy(self.data["xt"][idx_rnd].astype('float32'))
xtr_cur, xtr_his = self.seq_to_pairs(xtr)
xt_cat = torch.cat((xt_cur, xt_his), dim=1)
xtr_cat = torch.cat((xt_cur, xtr_his), dim=1)
sample = {"s1": {"yt": yt, "xt": xt},
"s2": {"yt": ytr, "xt": xtr},
"pos": {"x": xt_cat, "y": 1},
"neg": {"x": xtr_cat, "y": 0}
}
return sample
def seq_to_pairs(self, x):
x = x.unfold(dimension = 0, size = self.L+1, step = 1)
x = torch.swapaxes(x, 1, 2)
xx, yy = x[:,-1:], x[:,:-1]
return xx, yy
class SimulationDatasetPCLNS(Dataset):
def __init__(self, directory, transition="linear_nongaussian_ts", lags=2):
super().__init__()
assert transition in ["linear_nongaussian_ts", "nonlinear_gaussian_ts", "nonlinear_gaussian_sparse_ts", "nonlinear_gau_cins",
"nonlinear_nongaussian_ts", "nonlinear_ns", "nonlinear_gau_ns", "nonlinear_gau_cins_sparse"]
self.path = os.path.join(directory, transition, "data.npz")
self.npz = np.load(self.path)
self.L = lags
self.data = { }
for key in ["yt", "xt", "ct"]:
self.data[key] = self.npz[key]
def __len__(self):
return len(self.data["yt"])
def __getitem__(self, idx):
yt = torch.from_numpy(self.data["yt"][idx].astype('float32'))
xt = torch.from_numpy(self.data["xt"][idx].astype('float32'))
ct = torch.from_numpy(self.data["ct"][idx].astype('float32'))
xt_cur, xt_his = self.seq_to_pairs(xt)
idx_rnd = random.randint(0, len(self.data["yt"])-1)
ytr = torch.from_numpy(self.data["yt"][idx_rnd].astype('float32'))
xtr = torch.from_numpy(self.data["xt"][idx_rnd].astype('float32'))
ctr = torch.from_numpy(self.data["ct"][idx_rnd].astype('float32'))
xtr_cur, xtr_his = self.seq_to_pairs(xtr)
xt_cat = torch.cat((xt_cur, xt_his), dim=1)
xtr_cat = torch.cat((xt_cur, xtr_his), dim=1)
sample = {"s1": {"yt": yt, "xt": xt, "ct": ct},
"s2": {"yt": ytr, "xt": xtr, "ct": ctr},
"pos": {"x": xt_cat, "y": 1},
"neg": {"x": xtr_cat, "y": 0}
}
return sample
def seq_to_pairs(self, x):
x = x.unfold(dimension = 0, size = self.L+1, step = 1)
x = torch.swapaxes(x, 1, 2)
xx, yy = x[:,-1:], x[:,:-1]
return xx, yy
class DANS(Dataset):
def __init__(self, directory, dataset="da_10"):
super().__init__()
self.path = os.path.join(directory, dataset, "data.npz")
self.npz = np.load(self.path)
self.data = { }
for key in ["y", "x", "c"]:
self.data[key] = self.npz[key]
def __len__(self):
return len(self.data["y"])
def __getitem__(self, idx):
y = torch.from_numpy(self.data["y"][idx].astype('float32'))
x = torch.from_numpy(self.data["x"][idx].astype('float32'))
c = torch.from_numpy(self.data["c"][idx, None].astype('float32'))
sample = {"y": y, "x": x, "c": c}
return sample
| 41.098182
| 133
| 0.580871
| 1,507
| 11,302
| 4.096881
| 0.088918
| 0.088111
| 0.092971
| 0.099125
| 0.843213
| 0.810334
| 0.786524
| 0.77713
| 0.728377
| 0.717687
| 0
| 0.01467
| 0.246063
| 11,302
| 275
| 134
| 41.098182
| 0.709893
| 0.017077
| 0
| 0.662222
| 0
| 0
| 0.132126
| 0.058813
| 0
| 0
| 0
| 0
| 0.031111
| 1
| 0.128889
| false
| 0
| 0.031111
| 0.04
| 0.288889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6200e1b1ca6a347279d51d109eef180ba174fd5b
| 90
|
py
|
Python
|
src/__init__.py
|
HaritzPuerto/Entity_Extractor
|
30abbbbb45eb2cbdc836ee00dd4e38137ef653a0
|
[
"Apache-2.0"
] | 2
|
2022-03-14T12:46:06.000Z
|
2022-03-31T09:14:56.000Z
|
src/__init__.py
|
HaritzPuerto/Entity_Extractor
|
30abbbbb45eb2cbdc836ee00dd4e38137ef653a0
|
[
"Apache-2.0"
] | 1
|
2022-03-20T14:18:04.000Z
|
2022-03-20T14:18:04.000Z
|
src/__init__.py
|
HaritzPuerto/Entity_Extractor
|
30abbbbb45eb2cbdc836ee00dd4e38137ef653a0
|
[
"Apache-2.0"
] | null | null | null |
from .SRL import SRL_model
from .NER import Entity_model
from .Flair_NER import Flair_NER
| 22.5
| 32
| 0.833333
| 16
| 90
| 4.4375
| 0.4375
| 0.253521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 90
| 3
| 33
| 30
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6217e3d9ff366fbd5edc0aa942ec7886403f289c
| 49
|
py
|
Python
|
201801/t3.py
|
xyFryan/python3
|
9c9b2e6be4e27fa7f78bb2b1c4c3146d3c174741
|
[
"MIT"
] | null | null | null |
201801/t3.py
|
xyFryan/python3
|
9c9b2e6be4e27fa7f78bb2b1c4c3146d3c174741
|
[
"MIT"
] | null | null | null |
201801/t3.py
|
xyFryan/python3
|
9c9b2e6be4e27fa7f78bb2b1c4c3146d3c174741
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import test
test.print_test();
| 16.333333
| 18
| 0.755102
| 8
| 49
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.061224
| 49
| 3
| 19
| 16.333333
| 0.76087
| 0.346939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
62270c545d771421ce9c6674abf357cbcd9be1a9
| 28
|
py
|
Python
|
server/__init__.py
|
Jugbot/AWS-Mechanical-Turk-Audio-Classification
|
a65c8f258b6a8fd8dc55c75c688b08eb51907c60
|
[
"MIT"
] | 1
|
2019-04-05T16:26:51.000Z
|
2019-04-05T16:26:51.000Z
|
server/__init__.py
|
Jugbot/AWS-Mechanical-Turk-Audio-Classification
|
a65c8f258b6a8fd8dc55c75c688b08eb51907c60
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
Jugbot/AWS-Mechanical-Turk-Audio-Classification
|
a65c8f258b6a8fd8dc55c75c688b08eb51907c60
|
[
"MIT"
] | null | null | null |
from server.main import app
| 14
| 27
| 0.821429
| 5
| 28
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6230323b42853ba1481e85e248e497f79da3fe56
| 258
|
py
|
Python
|
src/domain/system.py
|
gmdlba/simulation
|
d47b58417bf7380f2bbf552275f9b3e51253e1a5
|
[
"MIT"
] | null | null | null |
src/domain/system.py
|
gmdlba/simulation
|
d47b58417bf7380f2bbf552275f9b3e51253e1a5
|
[
"MIT"
] | null | null | null |
src/domain/system.py
|
gmdlba/simulation
|
d47b58417bf7380f2bbf552275f9b3e51253e1a5
|
[
"MIT"
] | null | null | null |
class System_Status():
def __init__(self, plant_state=None):
if plant_state is None:
plant_state = [1, 1, 1]
self.plant_state = plant_state
def __str__(self):
return f"El estado de la planta es {self.plant_state}"
| 32.25
| 62
| 0.631783
| 38
| 258
| 3.894737
| 0.552632
| 0.405405
| 0.283784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016043
| 0.275194
| 258
| 8
| 62
| 32.25
| 0.775401
| 0
| 0
| 0
| 0
| 0
| 0.169884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
624eaff1c656c15da5c07040c0e7ff8f095597be
| 2,722
|
py
|
Python
|
dl/models/vgg/vgg.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 2
|
2021-02-06T22:40:13.000Z
|
2021-03-26T09:15:34.000Z
|
dl/models/vgg/vgg.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 8
|
2020-07-11T07:10:51.000Z
|
2022-03-12T00:39:03.000Z
|
dl/models/vgg/vgg.py
|
jjjkkkjjj/pytorch.dl
|
d82aa1191c14f328c62de85e391ac6fa1b4c7ee3
|
[
"MIT"
] | 2
|
2021-03-26T09:19:42.000Z
|
2021-07-27T02:38:09.000Z
|
from torch import nn
from ...models.vgg.base import VGGBase
from ..layers import Conv2d
from collections import OrderedDict
"""
https://stackoverflow.com/questions/55140554/convolutional-encoder-error-runtimeerror-input-and-target-shapes-do-not-matc/55143487#55143487
Here is the formula;
N --> Input Size, F --> Filter Size, stride-> Stride Size, pdg-> Padding size
ConvTranspose2d;
OutputSize = N*stride + F - stride - pdg*2
Conv2d;
OutputSize = (N - F)/stride + 1 + pdg*2/stride [e.g. 32/3=10 it ignores after the comma]
"""
class VGGBase11_bn(VGGBase):
def __init__(self, input_channels, **kwargs):
Conv2d.batch_norm = True
conv_layers = [
*Conv2d.block_relumpool('1', 1, input_channels, 64),
*Conv2d.block_relumpool('2', 1, 64, 128),
*Conv2d.block_relumpool('3', 2, 128, 256),
*Conv2d.block_relumpool('4', 2, 256, 512),
*Conv2d.block_relumpool('5', 2, 512, 512),
]
super().__init__(model_name='vgg11_bn', conv_layers=nn.ModuleDict(OrderedDict(conv_layers)), **kwargs)
class VGGBase11(VGGBase):
def __init__(self, input_channels, **kwargs):
Conv2d.batch_norm = False
conv_layers = [
*Conv2d.block_relumpool('1', 1, input_channels, 64),
*Conv2d.block_relumpool('2', 1, 64, 128),
*Conv2d.block_relumpool('3', 2, 128, 256),
*Conv2d.block_relumpool('4', 2, 256, 512),
*Conv2d.block_relumpool('5', 2, 512, 512)
]
super().__init__(model_name='vgg11', conv_layers=nn.ModuleDict(OrderedDict(conv_layers)), **kwargs)
class VGGBase16_bn(VGGBase):
def __init__(self, input_channels, **kwargs):
Conv2d.batch_norm = True
conv_layers = [
*Conv2d.block_relumpool('1', 2, input_channels, 64),
*Conv2d.block_relumpool('2', 2, 64, 128),
*Conv2d.block_relumpool('3', 3, 128, 256),
*Conv2d.block_relumpool('4', 3, 256, 512),
*Conv2d.block_relumpool('5', 3, 512, 512),
]
super().__init__(model_name='vgg16_bn', conv_layers=nn.ModuleDict(OrderedDict(conv_layers)), **kwargs)
class VGGBase16(VGGBase):
def __init__(self, input_channels, **kwargs):
Conv2d.batch_norm = False
conv_layers = [
*Conv2d.block_relumpool('1', 2, input_channels, 64),
*Conv2d.block_relumpool('2', 2, 64, 128),
*Conv2d.block_relumpool('3', 3, 128, 256),
*Conv2d.block_relumpool('4', 3, 256, 512),
*Conv2d.block_relumpool('5', 3, 512, 512),
]
super().__init__(model_name='vgg16', conv_layers=nn.ModuleDict(OrderedDict(conv_layers)), **kwargs)
| 29.912088
| 139
| 0.6205
| 339
| 2,722
| 4.734513
| 0.238938
| 0.137072
| 0.249221
| 0.04486
| 0.739564
| 0.739564
| 0.739564
| 0.739564
| 0.709034
| 0.709034
| 0
| 0.10292
| 0.23255
| 2,722
| 90
| 140
| 30.244444
| 0.66539
| 0
| 0
| 0.625
| 0
| 0
| 0.019896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
658f841c46696234266c0bbf55da63fdc81b7dff
| 122
|
py
|
Python
|
subdomains/admin.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | 3
|
2022-03-08T19:02:41.000Z
|
2022-03-16T23:04:37.000Z
|
subdomains/admin.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | 5
|
2022-03-17T02:16:52.000Z
|
2022-03-18T02:55:25.000Z
|
subdomains/admin.py
|
sjy5386/subshorts
|
d8170ee4a66989c3e852f86aa83bab6341e3aa10
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Subdomain)
admin.site.register(ReservedName)
| 17.428571
| 33
| 0.811475
| 16
| 122
| 6.1875
| 0.625
| 0.222222
| 0.343434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 122
| 6
| 34
| 20.333333
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
65edf682b39992b397ab3a02ee656721d844a71d
| 191
|
py
|
Python
|
pytorch_mask_rcnn/datasets/__init__.py
|
JinchengHeRyan/STATS402_Final_MaskRcnn
|
c8103751008afb2f969c7e321a7e843a50c0f681
|
[
"MIT"
] | null | null | null |
pytorch_mask_rcnn/datasets/__init__.py
|
JinchengHeRyan/STATS402_Final_MaskRcnn
|
c8103751008afb2f969c7e321a7e843a50c0f681
|
[
"MIT"
] | null | null | null |
pytorch_mask_rcnn/datasets/__init__.py
|
JinchengHeRyan/STATS402_Final_MaskRcnn
|
c8103751008afb2f969c7e321a7e843a50c0f681
|
[
"MIT"
] | null | null | null |
from .utils import *
try:
from .coco_eval import CocoEvaluator, prepare_for_coco
except ImportError:
pass
try:
from .dali import DALICOCODataLoader
except ImportError:
pass
| 15.916667
| 58
| 0.748691
| 23
| 191
| 6.086957
| 0.608696
| 0.1
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204188
| 191
| 11
| 59
| 17.363636
| 0.921053
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.222222
| 0.555556
| 0
| 0.555556
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
02945d19eebbe4dd9cc640f7077e8d492e5632ed
| 48
|
py
|
Python
|
pyglobe3d/core/icosalogic/__init__.py
|
ka-tet19/pyglobelib
|
d62f7636f5f971b897eba8fcf787fabb5ed181f1
|
[
"BSD-3-Clause"
] | null | null | null |
pyglobe3d/core/icosalogic/__init__.py
|
ka-tet19/pyglobelib
|
d62f7636f5f971b897eba8fcf787fabb5ed181f1
|
[
"BSD-3-Clause"
] | null | null | null |
pyglobe3d/core/icosalogic/__init__.py
|
ka-tet19/pyglobelib
|
d62f7636f5f971b897eba8fcf787fabb5ed181f1
|
[
"BSD-3-Clause"
] | null | null | null |
from pyglobe3d.core.icosalogic.mesh import Mesh
| 24
| 47
| 0.854167
| 7
| 48
| 5.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.083333
| 48
| 1
| 48
| 48
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
02b0b575523bd9ae71f87863129e7a6af9f09c87
| 4,419
|
py
|
Python
|
tests/test_rewards.py
|
GeorgianBadita/Dronem-gym-envirnoment
|
f3b488f6a4b55722c4b129051555a68d7775278c
|
[
"MIT"
] | 5
|
2020-06-13T10:43:42.000Z
|
2022-01-25T10:37:32.000Z
|
tests/test_rewards.py
|
GeorgianBadita/Dronem-gym-envirnoment
|
f3b488f6a4b55722c4b129051555a68d7775278c
|
[
"MIT"
] | null | null | null |
tests/test_rewards.py
|
GeorgianBadita/Dronem-gym-envirnoment
|
f3b488f6a4b55722c4b129051555a68d7775278c
|
[
"MIT"
] | null | null | null |
"""
@author: Badita Marin-Georgian
@email: geo.badita@gmail.com
@date: 24.03.2020 23:44
"""
from env_interpretation import State, settings
from env_interpretation.reward_utils import OneMinusOneRewardGiverAllowIllegal
from env_interpretation.utils import n_from_prod
def test_one_minus_one_reward_bad_env_3(env3_robots):
env_data = env3_robots.get_env_metadata()
action = [15, 20, 56]
action = n_from_prod(env_data['sets'], action)
interpreted_action = env3_robots.get_action_from_space(action)
reward_giver = OneMinusOneRewardGiverAllowIllegal()
assert reward_giver.give_reward(State([15, 20, 1], 5, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'],
env_data['max_memory']) == 2 * settings.REWARD_FOR_INVALID_MEETING - 1
def test_one_minus_one_reward_good_env_3(env3_robots):
env_data = env3_robots.get_env_metadata()
action = [57, 0, 0]
action = n_from_prod(env_data['sets'], action)
interpreted_action = env3_robots.get_action_from_space(action)
reward_giver = OneMinusOneRewardGiverAllowIllegal()
assert reward_giver.give_reward(State([2, 2, 0], 10, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'], env_data['max_memory']) == 1
action = [58, 0, 0]
action = n_from_prod(env_data['sets'], action)
interpreted_action = env3_robots.get_action_from_space(action)
assert reward_giver.give_reward(State([0, 0, 0], 10, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'], env_data['max_memory']) == settings.REWARD_FOR_INVALID_TRANSFER
def test_one_minus_one_reward_bad_env_4(env4_robots):
env_data = env4_robots.get_env_metadata()
action = [1, 2, 20, 15, 13, 28]
action = n_from_prod(env_data['sets'], action)
# {(r0 -> r1: 1), (r0 -> r2: 2), (r3 -> r0: 5),
# (r1 -> r2: 15), (r1 -> r3: 13), (r3 -> r2: 13)}
interpreted_action = env4_robots.get_action_from_space(action)
reward_giver = OneMinusOneRewardGiverAllowIllegal()
assert reward_giver.give_reward(
State([10, 10, 10, 10], 8, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'],
env_data['max_memory']) == 5 * settings.REWARD_FOR_INVALID_MEETING + settings.REWARD_FOR_INVALID_TRANSFER
assert reward_giver.give_reward(
State([0, 0, 0, 0], 8, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'],
env_data['max_memory']) == 5 * settings.REWARD_FOR_INVALID_MEETING + settings.REWARD_FOR_INVALID_TRANSFER
action = [0] * (env_data['num_robots'] * (env_data['num_robots'] - 1) // 2)
action = n_from_prod(env_data['sets'], action)
interpreted_action = env4_robots.get_action_from_space(action)
assert reward_giver.give_reward(
State([0, 0, 0, 0], 8, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'],
env_data['max_memory']) == 0
def test_one_minus_one_reward_good_env_4(env4_robots):
env_data = env4_robots.get_env_metadata()
action = [17, 0, 0, 0, 0, 13]
action = n_from_prod(env_data['sets'], action)
# {(r1 -> r0: 2), ,
# (r2 -> r3: 13)}
interpreted_action = env4_robots.get_action_from_space(action)
reward_giver = OneMinusOneRewardGiverAllowIllegal()
assert reward_giver.give_reward(
State([10, 10, 15, 0], 10, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'],
env_data['max_memory']) == 0
assert reward_giver.give_reward(
State([10, 10, 10, 10], 10, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'],
env_data['max_memory']) == settings.REWARD_FOR_INVALID_TRANSFER + 1
action = [25, 0, 0, 0, 0, 13]
action = n_from_prod(env_data['sets'], action)
# {(r1 -> r0: 10), ,
# (r2 -> r3: 13)}
interpreted_action = env4_robots.get_action_from_space(action)
assert reward_giver.give_reward(
State([10, 10, 15, 0], 10, None),
interpreted_action, env_data['meetings'],
env_data['cycle_lengths'],
env_data['max_memory']) == 0
| 40.916667
| 126
| 0.649695
| 580
| 4,419
| 4.586207
| 0.131034
| 0.105263
| 0.057519
| 0.071053
| 0.872556
| 0.860902
| 0.860902
| 0.860902
| 0.807519
| 0.807519
| 0
| 0.053092
| 0.224259
| 4,419
| 107
| 127
| 41.299065
| 0.72287
| 0.056574
| 0
| 0.697368
| 0
| 0
| 0.078909
| 0
| 0
| 0
| 0
| 0
| 0.118421
| 1
| 0.052632
| false
| 0
| 0.039474
| 0
| 0.092105
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
02d0cb10c454325e6ea54791f5698f384b461991
| 186
|
py
|
Python
|
env/gym_nav/envs/__init__.py
|
liuandrew/training-rl-algo
|
ca56d65209de0bf88ac1e1db2269bb7daac4da47
|
[
"MIT"
] | null | null | null |
env/gym_nav/envs/__init__.py
|
liuandrew/training-rl-algo
|
ca56d65209de0bf88ac1e1db2269bb7daac4da47
|
[
"MIT"
] | null | null | null |
env/gym_nav/envs/__init__.py
|
liuandrew/training-rl-algo
|
ca56d65209de0bf88ac1e1db2269bb7daac4da47
|
[
"MIT"
] | null | null | null |
from gym_nav.envs.nav_env import NavEnv
from gym_nav.envs.nav_env_flat import NavEnvFlat
from gym_nav.envs.morris_env import MorrisNav
from gym_nav.envs.gridworld_env import GridworldNav
| 46.5
| 51
| 0.876344
| 33
| 186
| 4.666667
| 0.393939
| 0.181818
| 0.25974
| 0.363636
| 0.25974
| 0.25974
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080645
| 186
| 4
| 51
| 46.5
| 0.900585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
02e53fadc815c7b93be7c8aadcc291f00ebcd0a7
| 1,041
|
py
|
Python
|
Lab/Project2/15/cubes.py
|
doubleliao/PyCrashCourse
|
d6a0b62f3b8ea9fee5efde9edd9006ca3961be25
|
[
"MIT"
] | null | null | null |
Lab/Project2/15/cubes.py
|
doubleliao/PyCrashCourse
|
d6a0b62f3b8ea9fee5efde9edd9006ca3961be25
|
[
"MIT"
] | null | null | null |
Lab/Project2/15/cubes.py
|
doubleliao/PyCrashCourse
|
d6a0b62f3b8ea9fee5efde9edd9006ca3961be25
|
[
"MIT"
] | null | null | null |
"""
15-1. Cubes: A number raised to the third power is a cube. Plot the first five
cubic numbers, and then plot the first 5000 cubic numbers.
"""
import matplotlib.pyplot as plt
x_s = range(1, 6)
y_s = [x**3 for x in x_s]
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.scatter(x_s, y_s, edgecolor='red', s=40)
# Set chart title and label axes.
ax.set_title("Cubes Numbers", fontsize=24)
ax.set_xlabel("Value", fontsize=14)
ax.set_ylabel("Cube of Value", fontsize=14)
# Set size of tick labels.
ax.tick_params(axis='both', labelsize=14)
plt.show()
import matplotlib.pyplot as plt
x_s = range(1, 5001)
y_s = [x**3 for x in x_s]
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.scatter(x_s, y_s, edgecolor='None', s=40)
# Set chart title and label axes.
ax.set_title("Cubes Numbers", fontsize=24)
ax.set_xlabel("Value", fontsize=14)
ax.set_ylabel("Cube of Value", fontsize=14)
# Set size of tick labels.
ax.tick_params(axis='both', labelsize=14)
# Set the range for each axis.
ax.axis([0, 5500, 0, 133500000000])
plt.show()
| 24.209302
| 78
| 0.708934
| 195
| 1,041
| 3.692308
| 0.353846
| 0.016667
| 0.083333
| 0.066667
| 0.752778
| 0.752778
| 0.752778
| 0.752778
| 0.752778
| 0.655556
| 0
| 0.060403
| 0.14121
| 1,041
| 43
| 79
| 24.209302
| 0.744966
| 0.269933
| 0
| 0.782609
| 0
| 0
| 0.121495
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f3231ec6848564972b88abca0761e8fdfb2e1e7a
| 141
|
py
|
Python
|
en/062/python/main.py
|
franciscogomes2020/exercises
|
8b33c4b9349a9331e4002a8225adc2a482c70024
|
[
"MIT"
] | null | null | null |
en/062/python/main.py
|
franciscogomes2020/exercises
|
8b33c4b9349a9331e4002a8225adc2a482c70024
|
[
"MIT"
] | null | null | null |
en/062/python/main.py
|
franciscogomes2020/exercises
|
8b33c4b9349a9331e4002a8225adc2a482c70024
|
[
"MIT"
] | null | null | null |
# Improve CHALLENGE 061 by asking the user if he wants to show some more terms. The program will exit when it says it wants to show 0 terms.
| 70.5
| 140
| 0.77305
| 28
| 141
| 3.892857
| 0.785714
| 0.12844
| 0.201835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.205674
| 141
| 1
| 141
| 141
| 0.9375
| 0.978723
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b835c2f17babd04b2aadc3e55adf33f090e6e815
| 13,218
|
py
|
Python
|
tests/layers/test_recurrent_layers.py
|
FGDBTKD/neupy
|
1f5e1ae9364e8c7816df79678a4648c689d2a5d1
|
[
"MIT"
] | null | null | null |
tests/layers/test_recurrent_layers.py
|
FGDBTKD/neupy
|
1f5e1ae9364e8c7816df79678a4648c689d2a5d1
|
[
"MIT"
] | null | null | null |
tests/layers/test_recurrent_layers.py
|
FGDBTKD/neupy
|
1f5e1ae9364e8c7816df79678a4648c689d2a5d1
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.model_selection import train_test_split
from neupy.exceptions import LayerConnectionError
from neupy.datasets import reber
from neupy import layers, algorithms, init
from base import BaseTestCase
def add_padding(data):
n_sampels = len(data)
max_seq_length = max(map(len, data))
data_matrix = np.zeros((n_sampels, max_seq_length))
for i, sample in enumerate(data):
data_matrix[i, -len(sample):] = sample
return data_matrix
class LSTMTestCase(BaseTestCase):
def setUp(self):
super(LSTMTestCase, self).setUp()
data, labels = reber.make_reber_classification(
n_samples=100, return_indeces=True)
data = add_padding(data + 1) # +1 to shift indeces
self.data = train_test_split(data, labels, test_size=0.2)
self.n_categories = len(reber.avaliable_letters) + 1
self.n_time_steps = self.data[0].shape[1]
def train_lstm(self, data, **lstm_options):
x_train, x_test, y_train, y_test = data
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
layers.LSTM(20, **lstm_options),
layers.Sigmoid(1),
],
step=0.05,
verbose=False,
batch_size=16,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=20)
y_predicted = network.predict(x_test).round()
accuracy = (y_predicted.T == y_test).mean()
return accuracy
def test_simple_lstm_sequence_classification(self):
accuracy = self.train_lstm(self.data)
self.assertGreaterEqual(accuracy, 0.9)
def test_simple_lstm_without_precomputed_input(self):
accuracy = self.train_lstm(self.data, precompute_input=False)
self.assertGreaterEqual(accuracy, 0.9)
def test_lstm_with_gradient_clipping(self):
accuracy = self.train_lstm(self.data, gradient_clipping=1)
self.assertGreaterEqual(accuracy, 0.9)
def test_lstm_with_enabled_peepholes_option(self):
accuracy = self.train_lstm(self.data, peepholes=True)
self.assertGreaterEqual(accuracy, 0.9)
def test_lstm_with_enabled_unroll_scan_option(self):
accuracy = self.train_lstm(self.data, unroll_scan=True)
self.assertGreaterEqual(accuracy, 0.9)
def test_lstm_with_enabled_backwards_option(self):
x_train, x_test, y_train, y_test = self.data
x_train = x_train[:, ::-1]
x_test = x_test[:, ::-1]
data = x_train, x_test, y_train, y_test
accuracy = self.train_lstm(data, backwards=True)
self.assertGreaterEqual(accuracy, 0.9)
accuracy = self.train_lstm(data, backwards=True, unroll_scan=True)
self.assertGreaterEqual(accuracy, 0.9)
def test_lstm_output_shapes(self):
network_1 = layers.join(
layers.Input((10, 2)),
layers.LSTM(20, only_return_final=True),
)
self.assertEqual(network_1.output_shape, (20,))
network_2 = layers.join(
layers.Input((10, 2)),
layers.LSTM(20, only_return_final=False),
)
self.assertEqual(network_2.output_shape, (10, 20))
def test_stacked_lstm(self):
x_train, x_test, y_train, y_test = self.data
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
layers.LSTM(10,
only_return_final=False,
weights=init.Normal(0.1)),
layers.LSTM(2,
weights=init.Normal(0.1)),
layers.Sigmoid(1),
],
step=0.05,
verbose=False,
batch_size=1,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=10)
y_predicted = network.predict(x_test).round()
accuracy = (y_predicted.T == y_test).mean()
self.assertGreaterEqual(accuracy, 0.9)
def test_stacked_lstm_with_enabled_backwards_option(self):
x_train, x_test, y_train, y_test = self.data
x_train = x_train[:, ::-1]
x_test = x_test[:, ::-1]
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
layers.LSTM(10, only_return_final=False, backwards=True),
layers.LSTM(2, backwards=True),
layers.Sigmoid(1),
],
step=0.1,
verbose=False,
batch_size=1,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=20)
y_predicted = network.predict(x_test).round()
accuracy = (y_predicted.T == y_test).mean()
self.assertGreaterEqual(accuracy, 0.9)
def test_lstm_with_4d_input(self):
x_train, x_test, y_train, y_test = self.data
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
# Make 4D input
layers.Reshape((self.n_time_steps, 5, 2), name='reshape'),
layers.LSTM(10),
layers.Sigmoid(1),
],
step=0.1,
verbose=False,
batch_size=1,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=2)
reshape = network.connection.end('reshape')
# +1 for batch size
output_dimension = len(reshape.output_shape) + 1
self.assertEqual(4, output_dimension)
def test_lstm_connection_exceptions(self):
with self.assertRaises(LayerConnectionError):
layers.Input(1) > layers.LSTM(10)
def test_lstm_modify_only_one_weight_parameter(self):
lstm_layer = layers.LSTM(2, weights=dict(
weight_in_to_ingate=init.Constant(0)
))
layers.join(
layers.Input((5, 3)),
lstm_layer,
)
for key, value in lstm_layer.weights.items():
if key == 'weight_in_to_ingate':
self.assertIsInstance(value, init.Constant)
else:
self.assertIsInstance(value, init.XavierUniform)
def test_lstm_initialization_exceptions(self):
with self.assertRaisesRegexp(ValueError, 'invalid key'):
layers.LSTM(1, weights=dict(unknown_parameter=10))
with self.assertRaisesRegexp(ValueError, 'callable'):
layers.LSTM(1, activation_functions=dict(ingate=10))
with self.assertRaises(TypeError):
layers.LSTM(1, activation_functions=lambda x: x)
class GRUTestCase(BaseTestCase):
def setUp(self):
super(GRUTestCase, self).setUp()
data, labels = reber.make_reber_classification(
n_samples=100, return_indeces=True)
data = add_padding(data + 1) # +1 to shift indeces
# self.data = x_train, x_test, y_train, y_test
self.data = train_test_split(data, labels, test_size=0.2)
self.n_categories = len(reber.avaliable_letters) + 1
self.n_time_steps = self.data[0].shape[1]
def train_gru(self, data, **gru_options):
x_train, x_test, y_train, y_test = data
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
layers.GRU(20, **gru_options),
layers.Sigmoid(1),
],
step=0.05,
verbose=False,
batch_size=16,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=20)
y_predicted = network.predict(x_test).round()
accuracy = (y_predicted.T == y_test).mean()
return accuracy
def test_simple_gru_sequence_classification(self):
accuracy = self.train_gru(self.data)
self.assertGreaterEqual(accuracy, 0.9)
def test_simple_gru_without_precomputed_input(self):
accuracy = self.train_gru(self.data, precompute_input=False)
self.assertGreaterEqual(accuracy, 0.9)
def test_gru_with_gradient_clipping(self):
accuracy = self.train_gru(self.data, gradient_clipping=1)
self.assertGreaterEqual(accuracy, 0.9)
def test_gru_with_enabled_unroll_scan_option(self):
accuracy = self.train_gru(self.data, unroll_scan=True)
self.assertGreaterEqual(accuracy, 0.9)
def test_gru_with_enabled_backwards_option(self):
x_train, x_test, y_train, y_test = self.data
x_train = x_train[:, ::-1]
x_test = x_test[:, ::-1]
data = x_train, x_test, y_train, y_test
accuracy = self.train_gru(data, backwards=True)
self.assertGreaterEqual(accuracy, 0.9)
accuracy = self.train_gru(data, backwards=True, unroll_scan=True)
self.assertGreaterEqual(accuracy, 0.9)
def test_gru_output_shapes(self):
network_1 = layers.join(
layers.Input((10, 2)),
layers.GRU(20, only_return_final=True),
)
self.assertEqual(network_1.output_shape, (20,))
network_2 = layers.join(
layers.Input((10, 2)),
layers.GRU(20, only_return_final=False),
)
self.assertEqual(network_2.output_shape, (10, 20))
def test_stacked_gru(self):
x_train, x_test, y_train, y_test = self.data
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
layers.GRU(10,
only_return_final=False,
weights=init.Normal(0.1)),
layers.GRU(1,
weights=init.Normal(0.1)),
layers.Sigmoid(1),
],
step=0.05,
verbose=False,
batch_size=1,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=10)
y_predicted = network.predict(x_test).round()
accuracy = (y_predicted.T == y_test).mean()
self.assertGreaterEqual(accuracy, 0.9)
def test_stacked_gru_with_enabled_backwards_option(self):
x_train, x_test, y_train, y_test = self.data
x_train = x_train[:, ::-1]
x_test = x_test[:, ::-1]
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
layers.GRU(10, only_return_final=False, backwards=True),
layers.GRU(2, backwards=True),
layers.Sigmoid(1),
],
step=0.02,
verbose=False,
batch_size=10,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=20)
y_predicted = network.predict(x_test).round()
accuracy = (y_predicted.T == y_test).mean()
self.assertGreaterEqual(accuracy, 0.9)
def test_gru_with_4d_input(self):
x_train, x_test, y_train, y_test = self.data
network = algorithms.RMSProp(
[
layers.Input(self.n_time_steps),
layers.Embedding(self.n_categories, 10),
# Make 4D input
layers.Reshape((self.n_time_steps, 5, 2), name='reshape'),
layers.GRU(10),
layers.Sigmoid(1),
],
step=0.1,
verbose=False,
batch_size=1,
error='binary_crossentropy',
)
network.train(x_train, y_train, x_test, y_test, epochs=2)
reshape = network.connection.end('reshape')
# +1 for batch size
output_dimension = len(reshape.output_shape) + 1
self.assertEqual(4, output_dimension)
def test_gru_connection_exceptions(self):
with self.assertRaises(LayerConnectionError):
layers.Input(1) > layers.GRU(10)
def test_gru_modify_only_one_weight_parameter(self):
gru_layer = layers.GRU(2, weights=dict(
weight_in_to_updategate=init.Constant(0)
))
layers.join(
layers.Input((5, 3)),
gru_layer,
)
for key, value in gru_layer.weights.items():
if key == 'weight_in_to_updategate':
self.assertIsInstance(value, init.Constant)
else:
self.assertIsInstance(value, init.XavierUniform)
def test_gru_initialization_exceptions(self):
with self.assertRaisesRegexp(ValueError, 'invalid key'):
layers.GRU(1, weights=dict(unknown_parameter=10))
with self.assertRaisesRegexp(ValueError, 'callable'):
layers.GRU(1, activation_functions=dict(ingate=10))
with self.assertRaises(TypeError):
layers.GRU(1, activation_functions=lambda x: x)
| 33.979434
| 74
| 0.599183
| 1,594
| 13,218
| 4.719573
| 0.09724
| 0.023262
| 0.027914
| 0.030706
| 0.910408
| 0.896052
| 0.868404
| 0.836236
| 0.810714
| 0.788382
| 0
| 0.0262
| 0.29543
| 13,218
| 388
| 75
| 34.06701
| 0.781596
| 0.011197
| 0
| 0.621711
| 0
| 0
| 0.019905
| 0.001761
| 0
| 0
| 0
| 0
| 0.115132
| 1
| 0.098684
| false
| 0
| 0.019737
| 0
| 0.134868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
b885c62c90c7043f7b93c2fed73cd96a9a5b3e70
| 104
|
py
|
Python
|
labext/prelude.py
|
binh-vu/ipywidgets_extra
|
3ddf46445306b2aa158bf3f696ec33f8ddd499e7
|
[
"MIT"
] | 3
|
2020-06-21T22:57:55.000Z
|
2021-06-03T23:36:39.000Z
|
labext/prelude.py
|
binh-vu/ipywidgets_extra
|
3ddf46445306b2aa158bf3f696ec33f8ddd499e7
|
[
"MIT"
] | null | null | null |
labext/prelude.py
|
binh-vu/ipywidgets_extra
|
3ddf46445306b2aa158bf3f696ec33f8ddd499e7
|
[
"MIT"
] | 1
|
2020-06-20T19:50:37.000Z
|
2020-06-20T19:50:37.000Z
|
import labext.modules as M
import labext.widgets as W
import labext.apps as A
from labext.tag import Tag
| 26
| 26
| 0.817308
| 20
| 104
| 4.25
| 0.55
| 0.423529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144231
| 104
| 4
| 27
| 26
| 0.955056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
b21f92eeaeb951819c01d10ed2515ea53ac09d75
| 34
|
py
|
Python
|
tudo/ex001.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | 1
|
2021-07-08T00:35:57.000Z
|
2021-07-08T00:35:57.000Z
|
tudo/ex001.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | null | null | null |
tudo/ex001.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | null | null | null |
print('Olá, mundo! Olá usuário!')
| 17
| 33
| 0.676471
| 5
| 34
| 4.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.766667
| 0
| 0
| 0
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b221778ec011db0189ab1af3f784b1474b34b6cb
| 60
|
py
|
Python
|
PBO/PBO_18142/Latihan_6.3parameter2.py
|
rosnialabania/PBO
|
597c71d778483b97443a3944a03cea759771fecc
|
[
"MIT"
] | null | null | null |
PBO/PBO_18142/Latihan_6.3parameter2.py
|
rosnialabania/PBO
|
597c71d778483b97443a3944a03cea759771fecc
|
[
"MIT"
] | null | null | null |
PBO/PBO_18142/Latihan_6.3parameter2.py
|
rosnialabania/PBO
|
597c71d778483b97443a3944a03cea759771fecc
|
[
"MIT"
] | null | null | null |
def hello(rosnia_labania):
print("hello {rosnia_labania}")
| 15
| 31
| 0.766667
| 8
| 60
| 5.5
| 0.625
| 0.5
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 60
| 3
| 32
| 20
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
b243ed1a20a70fa0d6069e81221bb36a885f7d21
| 42
|
py
|
Python
|
lib/dao/rdbms/__init__.py
|
the-constant/fammelody
|
970fb3a4a5d5d0dffd19f22a8a75cbf226fd57c2
|
[
"Apache-2.0"
] | null | null | null |
lib/dao/rdbms/__init__.py
|
the-constant/fammelody
|
970fb3a4a5d5d0dffd19f22a8a75cbf226fd57c2
|
[
"Apache-2.0"
] | null | null | null |
lib/dao/rdbms/__init__.py
|
the-constant/fammelody
|
970fb3a4a5d5d0dffd19f22a8a75cbf226fd57c2
|
[
"Apache-2.0"
] | null | null | null |
from .legal import *
from .market import *
| 21
| 21
| 0.738095
| 6
| 42
| 5.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 2
| 21
| 21
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2707bc4d9ab391f8afe81038cbbb0dd668cd0bc
| 221
|
py
|
Python
|
dataduct/etl/__init__.py
|
hillsdale18/ProjectX
|
4518d724eeb8ac73a6eae1d076d4846244e0944a
|
[
"Apache-2.0"
] | 3
|
2017-12-29T11:26:15.000Z
|
2022-02-11T16:44:28.000Z
|
dataduct/etl/__init__.py
|
hillsdale18/ProjectX
|
4518d724eeb8ac73a6eae1d076d4846244e0944a
|
[
"Apache-2.0"
] | 7
|
2017-09-21T23:25:24.000Z
|
2021-03-29T21:46:45.000Z
|
dataduct/etl/__init__.py
|
recurly/dataduct
|
29aec3526e170e5ad3b59a135780e72b69209f0b
|
[
"Apache-2.0"
] | 1
|
2020-05-12T08:54:38.000Z
|
2020-05-12T08:54:38.000Z
|
from .etl_actions import activate_pipeline
from .etl_actions import create_pipeline
from .etl_actions import read_pipeline_definition
from .etl_actions import validate_pipeline
from .etl_actions import visualize_pipeline
| 36.833333
| 49
| 0.886878
| 31
| 221
| 5.967742
| 0.354839
| 0.189189
| 0.378378
| 0.540541
| 0.454054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090498
| 221
| 5
| 50
| 44.2
| 0.920398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a283da01f94b6cd141c889c9bc8fdabba19ea5e8
| 39
|
py
|
Python
|
quiz_mill/__init__.py
|
eoas-ubc/quiz_mill
|
922989244964e2147ecac9186bdafae8d8f91813
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
quiz_mill/__init__.py
|
eoas-ubc/quiz_mill
|
922989244964e2147ecac9186bdafae8d8f91813
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
quiz_mill/__init__.py
|
eoas-ubc/quiz_mill
|
922989244964e2147ecac9186bdafae8d8f91813
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-06-01T23:03:17.000Z
|
2021-06-01T23:03:17.000Z
|
from .solve_layers import do_two_matrix
| 39
| 39
| 0.897436
| 7
| 39
| 4.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2b7207bc1f4292b9acf62355ca2faac44b9dd87
| 16
|
py
|
Python
|
test/login.py
|
hongren798911/xuexigit
|
a0925a222db9468333e3e91aa6b76423f01326b7
|
[
"MIT"
] | null | null | null |
test/login.py
|
hongren798911/xuexigit
|
a0925a222db9468333e3e91aa6b76423f01326b7
|
[
"MIT"
] | null | null | null |
test/login.py
|
hongren798911/xuexigit
|
a0925a222db9468333e3e91aa6b76423f01326b7
|
[
"MIT"
] | null | null | null |
a = 10
dev = 1
| 4
| 7
| 0.4375
| 4
| 16
| 1.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.4375
| 16
| 3
| 8
| 5.333333
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a2c0e4d57badbdb14df6f2cb71bc2e477f8a177e
| 112
|
py
|
Python
|
PokerRL/rl/base_cls/workers/__init__.py
|
MAWUT0R/PokerRL
|
95708a5f7a16cb151bc4253132bdfd22ea7a9b25
|
[
"MIT"
] | 247
|
2019-06-20T16:41:36.000Z
|
2022-03-28T11:40:12.000Z
|
PokerRL/rl/base_cls/workers/__init__.py
|
MAWUT0R/PokerRL
|
95708a5f7a16cb151bc4253132bdfd22ea7a9b25
|
[
"MIT"
] | 11
|
2019-08-23T09:20:31.000Z
|
2021-12-05T23:44:27.000Z
|
PokerRL/rl/base_cls/workers/__init__.py
|
MAWUT0R/PokerRL
|
95708a5f7a16cb151bc4253132bdfd22ea7a9b25
|
[
"MIT"
] | 61
|
2019-06-17T06:06:11.000Z
|
2022-03-01T17:55:44.000Z
|
from .ChiefBase import *
from .DriverBase import *
from .ParameterServerBase import *
from .WorkerBase import *
| 22.4
| 34
| 0.785714
| 12
| 112
| 7.333333
| 0.5
| 0.340909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 112
| 4
| 35
| 28
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2c373f29ed31ee9058b99d7c574372addb8c851
| 33
|
py
|
Python
|
goto_cloud/commander/public.py
|
jdepoix/goto_cloud
|
59bb9923026e1b1dc6e8e08fb6b21300c8e8854a
|
[
"MIT"
] | 2
|
2018-02-04T23:22:17.000Z
|
2019-04-15T12:06:04.000Z
|
goto_cloud/commander/public.py
|
jdepoix/goto_cloud
|
59bb9923026e1b1dc6e8e08fb6b21300c8e8854a
|
[
"MIT"
] | null | null | null |
goto_cloud/commander/public.py
|
jdepoix/goto_cloud
|
59bb9923026e1b1dc6e8e08fb6b21300c8e8854a
|
[
"MIT"
] | null | null | null |
from .commander import Commander
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2d32c6029cfe13c7fc37f21be0adfcdc61e5002
| 38
|
py
|
Python
|
Ep. 1/gameengine/__init__.py
|
mandaw2014/gameengine_tutorial
|
9a2edb85e7885b36e04fdb9132508a0c13b5a42b
|
[
"MIT"
] | null | null | null |
Ep. 1/gameengine/__init__.py
|
mandaw2014/gameengine_tutorial
|
9a2edb85e7885b36e04fdb9132508a0c13b5a42b
|
[
"MIT"
] | null | null | null |
Ep. 1/gameengine/__init__.py
|
mandaw2014/gameengine_tutorial
|
9a2edb85e7885b36e04fdb9132508a0c13b5a42b
|
[
"MIT"
] | null | null | null |
from gameengine.main import GameEngine
| 38
| 38
| 0.894737
| 5
| 38
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.971429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a2f066ba35848bffc041b1d139b4f42d9a1b1d93
| 54
|
py
|
Python
|
scripts/templates/fastApiCrud/partials/controller_init_property.py
|
sulthonzh/zaruba
|
ec9262f43da17d86330da2c593b7da451aabd60f
|
[
"Apache-2.0"
] | null | null | null |
scripts/templates/fastApiCrud/partials/controller_init_property.py
|
sulthonzh/zaruba
|
ec9262f43da17d86330da2c593b7da451aabd60f
|
[
"Apache-2.0"
] | null | null | null |
scripts/templates/fastApiCrud/partials/controller_init_property.py
|
sulthonzh/zaruba
|
ec9262f43da17d86330da2c593b7da451aabd60f
|
[
"Apache-2.0"
] | null | null | null |
self.zaruba_entity_name_repo = zaruba_entity_name_repo
| 54
| 54
| 0.925926
| 9
| 54
| 4.888889
| 0.555556
| 0.545455
| 0.727273
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 54
| 1
| 54
| 54
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0c2c76f272fe46dc5497b0937a3efe7982ce8d65
| 192
|
py
|
Python
|
remind/admin.py
|
jscpeterson/reminders
|
f1ad78daff6314a697a32a0a52d5ac16aa54eeca
|
[
"FSFAP"
] | null | null | null |
remind/admin.py
|
jscpeterson/reminders
|
f1ad78daff6314a697a32a0a52d5ac16aa54eeca
|
[
"FSFAP"
] | null | null | null |
remind/admin.py
|
jscpeterson/reminders
|
f1ad78daff6314a697a32a0a52d5ac16aa54eeca
|
[
"FSFAP"
] | null | null | null |
from django.contrib import admin
from cases.models import Case, Motion
from remind.models import Deadline
admin.site.register(Case)
admin.site.register(Deadline)
admin.site.register(Motion)
| 21.333333
| 37
| 0.822917
| 28
| 192
| 5.642857
| 0.464286
| 0.170886
| 0.322785
| 0.316456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 192
| 8
| 38
| 24
| 0.908046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a75c2a14a99561b27bca85847bf349af8fad1f76
| 62
|
py
|
Python
|
test_impall.py
|
rec/import_all
|
2d864d016ce9b320521fcaa9f5206d5cbef19107
|
[
"MIT"
] | 1
|
2019-05-26T15:09:32.000Z
|
2019-05-26T15:09:32.000Z
|
test_impall.py
|
rec/import_all
|
2d864d016ce9b320521fcaa9f5206d5cbef19107
|
[
"MIT"
] | 12
|
2019-05-13T12:56:13.000Z
|
2019-10-01T13:30:12.000Z
|
test_impall.py
|
rec/impall
|
2d864d016ce9b320521fcaa9f5206d5cbef19107
|
[
"MIT"
] | null | null | null |
import impall
class ImpAllTest(impall.ImpAllTest):
pass
| 10.333333
| 36
| 0.758065
| 7
| 62
| 6.714286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177419
| 62
| 5
| 37
| 12.4
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
a75d277fba51dec40450eb94f734ae3f79b9b574
| 10,509
|
py
|
Python
|
lib/services/server/ncloud_server/__init__.py
|
KidongSohn/ncloud-sdk-py
|
1c62471a9bd320d77164ed3193a0ebb9f64229ff
|
[
"MIT"
] | null | null | null |
lib/services/server/ncloud_server/__init__.py
|
KidongSohn/ncloud-sdk-py
|
1c62471a9bd320d77164ed3193a0ebb9f64229ff
|
[
"MIT"
] | null | null | null |
lib/services/server/ncloud_server/__init__.py
|
KidongSohn/ncloud-sdk-py
|
1c62471a9bd320d77164ed3193a0ebb9f64229ff
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
server
OpenAPI spec version: 2018-06-22T02:34:44Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from ncloud_server.api.v2_api import V2Api
# import ApiClient
from ncloud_server.api_client import ApiClient
from ncloud_server.configuration import Configuration
# import models into sdk package
from ncloud_server.model.access_control_group import AccessControlGroup
from ncloud_server.model.access_control_rule import AccessControlRule
from ncloud_server.model.add_nas_volume_access_control_request import AddNasVolumeAccessControlRequest
from ncloud_server.model.add_nas_volume_access_control_response import AddNasVolumeAccessControlResponse
from ncloud_server.model.add_port_forwarding_rules_request import AddPortForwardingRulesRequest
from ncloud_server.model.add_port_forwarding_rules_response import AddPortForwardingRulesResponse
from ncloud_server.model.associate_public_ip_with_server_instance_request import AssociatePublicIpWithServerInstanceRequest
from ncloud_server.model.associate_public_ip_with_server_instance_response import AssociatePublicIpWithServerInstanceResponse
from ncloud_server.model.block_storage_instance import BlockStorageInstance
from ncloud_server.model.block_storage_snapshot_instance import BlockStorageSnapshotInstance
from ncloud_server.model.change_nas_volume_size_request import ChangeNasVolumeSizeRequest
from ncloud_server.model.change_nas_volume_size_response import ChangeNasVolumeSizeResponse
from ncloud_server.model.change_server_instance_spec_request import ChangeServerInstanceSpecRequest
from ncloud_server.model.change_server_instance_spec_response import ChangeServerInstanceSpecResponse
from ncloud_server.model.common_code import CommonCode
from ncloud_server.model.create_block_storage_instance_request import CreateBlockStorageInstanceRequest
from ncloud_server.model.create_block_storage_instance_response import CreateBlockStorageInstanceResponse
from ncloud_server.model.create_login_key_request import CreateLoginKeyRequest
from ncloud_server.model.create_login_key_response import CreateLoginKeyResponse
from ncloud_server.model.create_member_server_image_request import CreateMemberServerImageRequest
from ncloud_server.model.create_member_server_image_response import CreateMemberServerImageResponse
from ncloud_server.model.create_nas_volume_instance_request import CreateNasVolumeInstanceRequest
from ncloud_server.model.create_nas_volume_instance_response import CreateNasVolumeInstanceResponse
from ncloud_server.model.create_public_ip_instance_request import CreatePublicIpInstanceRequest
from ncloud_server.model.create_public_ip_instance_response import CreatePublicIpInstanceResponse
from ncloud_server.model.create_server_instances_request import CreateServerInstancesRequest
from ncloud_server.model.create_server_instances_response import CreateServerInstancesResponse
from ncloud_server.model.delete_block_storage_instances_request import DeleteBlockStorageInstancesRequest
from ncloud_server.model.delete_block_storage_instances_response import DeleteBlockStorageInstancesResponse
from ncloud_server.model.delete_login_key_request import DeleteLoginKeyRequest
from ncloud_server.model.delete_login_key_response import DeleteLoginKeyResponse
from ncloud_server.model.delete_member_server_images_request import DeleteMemberServerImagesRequest
from ncloud_server.model.delete_member_server_images_response import DeleteMemberServerImagesResponse
from ncloud_server.model.delete_nas_volume_instance_request import DeleteNasVolumeInstanceRequest
from ncloud_server.model.delete_nas_volume_instance_response import DeleteNasVolumeInstanceResponse
from ncloud_server.model.delete_port_forwarding_rules_request import DeletePortForwardingRulesRequest
from ncloud_server.model.delete_port_forwarding_rules_response import DeletePortForwardingRulesResponse
from ncloud_server.model.delete_public_ip_instances_request import DeletePublicIpInstancesRequest
from ncloud_server.model.delete_public_ip_instances_response import DeletePublicIpInstancesResponse
from ncloud_server.model.disassociate_public_ip_from_server_instance_request import DisassociatePublicIpFromServerInstanceRequest
from ncloud_server.model.disassociate_public_ip_from_server_instance_response import DisassociatePublicIpFromServerInstanceResponse
from ncloud_server.model.get_access_control_group_list_request import GetAccessControlGroupListRequest
from ncloud_server.model.get_access_control_group_list_response import GetAccessControlGroupListResponse
from ncloud_server.model.get_access_control_group_server_instance_list_request import GetAccessControlGroupServerInstanceListRequest
from ncloud_server.model.get_access_control_group_server_instance_list_response import GetAccessControlGroupServerInstanceListResponse
from ncloud_server.model.get_access_control_rule_list_request import GetAccessControlRuleListRequest
from ncloud_server.model.get_access_control_rule_list_response import GetAccessControlRuleListResponse
from ncloud_server.model.get_block_storage_instance_list_request import GetBlockStorageInstanceListRequest
from ncloud_server.model.get_block_storage_instance_list_response import GetBlockStorageInstanceListResponse
from ncloud_server.model.get_block_storage_snapshot_instance_list_request import GetBlockStorageSnapshotInstanceListRequest
from ncloud_server.model.get_block_storage_snapshot_instance_list_response import GetBlockStorageSnapshotInstanceListResponse
from ncloud_server.model.get_login_key_list_request import GetLoginKeyListRequest
from ncloud_server.model.get_login_key_list_response import GetLoginKeyListResponse
from ncloud_server.model.get_member_server_image_list_request import GetMemberServerImageListRequest
from ncloud_server.model.get_member_server_image_list_response import GetMemberServerImageListResponse
from ncloud_server.model.get_nas_volume_instance_list_request import GetNasVolumeInstanceListRequest
from ncloud_server.model.get_nas_volume_instance_list_response import GetNasVolumeInstanceListResponse
from ncloud_server.model.get_nas_volume_instance_rating_list_request import GetNasVolumeInstanceRatingListRequest
from ncloud_server.model.get_nas_volume_instance_rating_list_response import GetNasVolumeInstanceRatingListResponse
from ncloud_server.model.get_port_forwarding_rule_list_request import GetPortForwardingRuleListRequest
from ncloud_server.model.get_port_forwarding_rule_list_response import GetPortForwardingRuleListResponse
from ncloud_server.model.get_public_ip_instance_list_request import GetPublicIpInstanceListRequest
from ncloud_server.model.get_public_ip_instance_list_response import GetPublicIpInstanceListResponse
from ncloud_server.model.get_public_ip_target_server_instance_list_request import GetPublicIpTargetServerInstanceListRequest
from ncloud_server.model.get_public_ip_target_server_instance_list_response import GetPublicIpTargetServerInstanceListResponse
from ncloud_server.model.get_raid_list_request import GetRaidListRequest
from ncloud_server.model.get_raid_list_response import GetRaidListResponse
from ncloud_server.model.get_region_list_request import GetRegionListRequest
from ncloud_server.model.get_region_list_response import GetRegionListResponse
from ncloud_server.model.get_root_password_request import GetRootPasswordRequest
from ncloud_server.model.get_root_password_response import GetRootPasswordResponse
from ncloud_server.model.get_server_image_product_list_request import GetServerImageProductListRequest
from ncloud_server.model.get_server_image_product_list_response import GetServerImageProductListResponse
from ncloud_server.model.get_server_instance_list_request import GetServerInstanceListRequest
from ncloud_server.model.get_server_instance_list_response import GetServerInstanceListResponse
from ncloud_server.model.get_server_product_list_request import GetServerProductListRequest
from ncloud_server.model.get_server_product_list_response import GetServerProductListResponse
from ncloud_server.model.get_zone_list_request import GetZoneListRequest
from ncloud_server.model.get_zone_list_response import GetZoneListResponse
from ncloud_server.model.login_key import LoginKey
from ncloud_server.model.member_server_image import MemberServerImage
from ncloud_server.model.nas_volume_instance import NasVolumeInstance
from ncloud_server.model.nas_volume_instance_custom_ip import NasVolumeInstanceCustomIp
from ncloud_server.model.nas_volume_instance_rating import NasVolumeInstanceRating
from ncloud_server.model.port_forwarding_rule import PortForwardingRule
from ncloud_server.model.port_forwarding_rule_parameter import PortForwardingRuleParameter
from ncloud_server.model.product import Product
from ncloud_server.model.public_ip_instance import PublicIpInstance
from ncloud_server.model.raid import Raid
from ncloud_server.model.reboot_server_instances_request import RebootServerInstancesRequest
from ncloud_server.model.reboot_server_instances_response import RebootServerInstancesResponse
from ncloud_server.model.recreate_server_instance_request import RecreateServerInstanceRequest
from ncloud_server.model.recreate_server_instance_response import RecreateServerInstanceResponse
from ncloud_server.model.region import Region
from ncloud_server.model.remove_nas_volume_access_control_request import RemoveNasVolumeAccessControlRequest
from ncloud_server.model.remove_nas_volume_access_control_response import RemoveNasVolumeAccessControlResponse
from ncloud_server.model.root_password import RootPassword
from ncloud_server.model.server_instance import ServerInstance
from ncloud_server.model.set_nas_volume_access_control_request import SetNasVolumeAccessControlRequest
from ncloud_server.model.set_nas_volume_access_control_response import SetNasVolumeAccessControlResponse
from ncloud_server.model.start_server_instances_request import StartServerInstancesRequest
from ncloud_server.model.start_server_instances_response import StartServerInstancesResponse
from ncloud_server.model.stop_server_instances_request import StopServerInstancesRequest
from ncloud_server.model.stop_server_instances_response import StopServerInstancesResponse
from ncloud_server.model.terminate_server_instances_request import TerminateServerInstancesRequest
from ncloud_server.model.terminate_server_instances_response import TerminateServerInstancesResponse
from ncloud_server.model.zone import Zone
| 80.838462
| 134
| 0.929013
| 1,220
| 10,509
| 7.571311
| 0.159016
| 0.119086
| 0.190538
| 0.243261
| 0.498863
| 0.47169
| 0.443651
| 0.323915
| 0.182743
| 0.073184
| 0
| 0.001798
| 0.047483
| 10,509
| 129
| 135
| 81.465116
| 0.920979
| 0.02103
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.027027
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a7b77a219f5aefa517f37b7dd044873b918acf5c
| 2,804
|
py
|
Python
|
lib/status/commands/http_request.py
|
chrissimpkins/status
|
4e76d751c537d42dd5603779cbe893551be2b89e
|
[
"MIT",
"Unlicense"
] | 2
|
2017-04-10T20:17:44.000Z
|
2021-07-19T19:07:34.000Z
|
lib/status/commands/http_request.py
|
chrissimpkins/status
|
4e76d751c537d42dd5603779cbe893551be2b89e
|
[
"MIT",
"Unlicense"
] | 1
|
2021-07-19T19:09:30.000Z
|
2021-07-19T19:22:36.000Z
|
lib/status/commands/http_request.py
|
chrissimpkins/status
|
4e76d751c537d42dd5603779cbe893551be2b89e
|
[
"MIT",
"Unlicense"
] | 4
|
2016-12-20T19:57:07.000Z
|
2019-02-16T08:08:15.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from Naked.toolshed.network import HTTP
from Naked.toolshed.system import exit_success
from Naked.toolshed.system import stderr
from requests.exceptions import ConnectionError
class Get:
def __init__(self, url):
self.url = url
def get_response(self):
try:
the_url = prepare_url(self.url)
http = HTTP(the_url)
http.get()
resp = http.response()
# confirm that a response was returned, abort if not
if resp == None and the_url.startswith('https://'):
stderr("Unable to connect to the requested URL. This can happen if the secure HTTP protocol is not supported at the requested URL.")
sys.exit(1)
elif resp == None:
stderr("Unable to connect to the requested URL. Please confirm your URL and try again.")
sys.exit(1)
if len(resp.history) > 0:
count = len(resp.history)
for i in range(count):
print(str(resp.history[i].status_code) + " : " + str(resp.history[i].url))
print(str(http.res.status_code) + " : " + http.res.url)
exit_success()
except ConnectionError:
error_string = "Unable to connect to the URL, " + self.url
stderr(error_string, 1)
except Exception as e:
raise e
class Post:
def __init__(self, url):
self.url = url
def post_response(self):
try:
the_url = prepare_url(self.url)
http = HTTP(the_url)
http.post()
resp = http.response()
# confirm that a response was returned, abort if not
if resp == None and the_url.startswith('https://'):
stderr("Unable to connect to the requested URL. This can happen if the secure HTTP protocol is not supported at the requested URL.")
sys.exit(1)
elif resp == None:
stderr("Unable to connect to the requested URL. Please confirm your URL and try again.")
sys.exit(1)
if len(resp.history) > 0:
count = len(resp.history)
for i in range(count):
print(str(resp.history[i].status_code) + " : " + str(resp.history[i].url))
print(str(http.res.status_code) + " : " + the_url)
exit_success()
except ConnectionError as ce:
error_string = "Unable to connect to the URL, " + self.url
stderr(error_string, 1)
except Exception as e:
raise e
def prepare_url(url):
if url.startswith('http://') or url.startswith('https://'):
return url
else:
return 'http://' + url
| 35.948718
| 148
| 0.564551
| 356
| 2,804
| 4.359551
| 0.241573
| 0.034794
| 0.03866
| 0.065722
| 0.835052
| 0.752577
| 0.752577
| 0.752577
| 0.717784
| 0.717784
| 0
| 0.004852
| 0.338445
| 2,804
| 77
| 149
| 36.415584
| 0.831806
| 0.049215
| 0
| 0.677419
| 0
| 0.032258
| 0.191585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.080645
| 0
| 0.225806
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac167b5b9c718ac4e9de726305e2afdc0ceccb9d
| 208
|
py
|
Python
|
flow_py_sdk/signer/__init__.py
|
nichandy/flow-py-sdk
|
716c1690f38eeb78f479d1cf860b974cc6a53b04
|
[
"MIT"
] | null | null | null |
flow_py_sdk/signer/__init__.py
|
nichandy/flow-py-sdk
|
716c1690f38eeb78f479d1cf860b974cc6a53b04
|
[
"MIT"
] | null | null | null |
flow_py_sdk/signer/__init__.py
|
nichandy/flow-py-sdk
|
716c1690f38eeb78f479d1cf860b974cc6a53b04
|
[
"MIT"
] | 1
|
2021-09-15T10:29:00.000Z
|
2021-09-15T10:29:00.000Z
|
from flow_py_sdk.signer.hash_algo import HashAlgo
from flow_py_sdk.signer.sign_algo import SignAlgo
from flow_py_sdk.signer.signer import Signer
from flow_py_sdk.signer.in_memory_signer import InMemorySigner
| 41.6
| 62
| 0.884615
| 36
| 208
| 4.777778
| 0.388889
| 0.186047
| 0.232558
| 0.302326
| 0.44186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 208
| 4
| 63
| 52
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac1b3e37b2c9396c347c1094b15d99fc28069ea5
| 164
|
py
|
Python
|
pages/context_procesor.py
|
mahanfarzaneh2000/Freelara
|
803cd0e75c5c03ee23ed6dea5202f3e6a7af4864
|
[
"Apache-2.0"
] | null | null | null |
pages/context_procesor.py
|
mahanfarzaneh2000/Freelara
|
803cd0e75c5c03ee23ed6dea5202f3e6a7af4864
|
[
"Apache-2.0"
] | null | null | null |
pages/context_procesor.py
|
mahanfarzaneh2000/Freelara
|
803cd0e75c5c03ee23ed6dea5202f3e6a7af4864
|
[
"Apache-2.0"
] | 1
|
2021-04-11T09:59:54.000Z
|
2021-04-11T09:59:54.000Z
|
import datetime
from gigs.models import Category
def context_procesor(request):
return{'year':datetime.datetime.now().year,'categories':Category.objects.all()}
| 32.8
| 83
| 0.786585
| 21
| 164
| 6.095238
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079268
| 164
| 5
| 83
| 32.8
| 0.847682
| 0
| 0
| 0
| 0
| 0
| 0.084848
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ac3208f3bf096fc7be1d65ea7a50e5473c75efb0
| 588
|
py
|
Python
|
deedee_btc_data_process.py
|
justin-oxford/dee-dee-btc
|
0d6c97a819d6de48d1c846e2859bc0c4a7a50ebe
|
[
"MIT"
] | null | null | null |
deedee_btc_data_process.py
|
justin-oxford/dee-dee-btc
|
0d6c97a819d6de48d1c846e2859bc0c4a7a50ebe
|
[
"MIT"
] | null | null | null |
deedee_btc_data_process.py
|
justin-oxford/dee-dee-btc
|
0d6c97a819d6de48d1c846e2859bc0c4a7a50ebe
|
[
"MIT"
] | null | null | null |
#
#
#
#
#
#IMPORTS
# -------------------------------------------------------------------------------------------------
from imports import *
# -------------------------------------------------------------------------------------------------
#CONSTANTS
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
# FUNCTIONS
# -------------------------------------------------------------------------------------------------
| 29.4
| 100
| 0.071429
| 6
| 588
| 7
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079932
| 588
| 19
| 101
| 30.947368
| 0.077634
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac363f5486b65cdfe26061696dc8ef0b2d5c6f69
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/future/moves/urllib/response.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/future/moves/urllib/response.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/future/moves/urllib/response.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/64/46/4c/2f41696c1fbe18878114fbd2cdbb65549e84b11e2d088e2a07b0fcb054
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac69f6987b01935cec09f2d1c2672f3dd8df321c
| 63
|
py
|
Python
|
graphein/molecule/features/__init__.py
|
avivko/graphein
|
0a2d5e39787cf002c06b03615d9dd3fe62e0171d
|
[
"MIT"
] | null | null | null |
graphein/molecule/features/__init__.py
|
avivko/graphein
|
0a2d5e39787cf002c06b03615d9dd3fe62e0171d
|
[
"MIT"
] | null | null | null |
graphein/molecule/features/__init__.py
|
avivko/graphein
|
0a2d5e39787cf002c06b03615d9dd3fe62e0171d
|
[
"MIT"
] | null | null | null |
from .edges import *
from .graph import *
from .nodes import *
| 15.75
| 20
| 0.714286
| 9
| 63
| 5
| 0.555556
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 63
| 3
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ac7b0d91823debfe996e83f10eb2f5a264799d73
| 45
|
py
|
Python
|
src/core/tests.py
|
resumme/resum.me
|
7fe8fdad2a3d946ab15cc91e6c6ea00fdd99d495
|
[
"MIT"
] | 10
|
2018-10-11T06:47:00.000Z
|
2020-05-05T06:26:15.000Z
|
src/core/tests.py
|
resumme/resum.me
|
7fe8fdad2a3d946ab15cc91e6c6ea00fdd99d495
|
[
"MIT"
] | 22
|
2018-10-15T13:56:30.000Z
|
2022-03-11T23:32:48.000Z
|
src/core/tests.py
|
resumme/resum.me
|
7fe8fdad2a3d946ab15cc91e6c6ea00fdd99d495
|
[
"MIT"
] | 5
|
2018-10-16T19:12:49.000Z
|
2018-10-20T07:46:47.000Z
|
def test_test_are_working():
assert True
| 15
| 28
| 0.755556
| 7
| 45
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 29
| 22.5
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ac81038d88b558206704ae267a617ac255c443e4
| 24,206
|
py
|
Python
|
htcl_totals_test.py
|
NREL/scout
|
acf38df7ce877cbd8c1c10f4f61fdf1d088fd947
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
htcl_totals_test.py
|
NREL/scout
|
acf38df7ce877cbd8c1c10f4f61fdf1d088fd947
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
htcl_totals_test.py
|
NREL/scout
|
acf38df7ce877cbd8c1c10f4f61fdf1d088fd947
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
""" Tests for running the htcl_totals.py routine """
# Import code to be tested
import htcl_totals
# Import needed packages
import unittest
import itertools
class CommonMethods(object):
"""Define common methods for use in all tests below."""
def dict_check(self, dict1, dict2):
"""Check the equality of two dicts.
Args:
dict1 (dict): First dictionary to be compared
dict2 (dict): Second dictionary to be compared
Raises:
AssertionError: If dictionaries are not equal.
"""
# zip() and zip_longest() produce tuples for the items
# identified, where in the case of a dict, the first item
# in the tuple is the key and the second item is the value;
# in the case where the dicts are not of identical size,
# zip_longest() will use the fill value created below as a
# substitute in the dict that has missing content; this
# value is given as a tuple to be of comparable structure
# to the normal output from zip_longest()
fill_val = ('substituted entry', 5.2)
# In this structure, k and k2 are the keys that correspond to
# the dicts or unitary values that are found in i and i1,
# respectively, at the current level of the recursive
# exploration of dict1 and dict1, respectively
for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()),
sorted(dict2.items()),
fillvalue=fill_val):
# Confirm that at the current location in the dict structure,
# the keys are equal; this should fail if one of the dicts
# is empty, is missing section(s), or has different key names
self.assertEqual(k, k2)
# If the recursion has not yet reached the terminal/leaf node
if isinstance(i, dict):
# Test that the dicts from the current keys are equal
self.assertCountEqual(i, i2)
# Continue to recursively traverse the dict
self.dict_check(i, i2)
else:
# At the terminal/leaf node, formatted as a point value
self.assertAlmostEqual(i, i2, places=2)
class SumHtClEnergyTest(unittest.TestCase, CommonMethods):
"""Test operation of 'sum_htcl_energy' function.
Verify that function properly sums all heating and cooling energy for
a given climate zone, building type, and structure type combination,
converting from site to source energy in the process.
Attributes:
aeo_years (list): Modeling time horizon.
ss_conv (dict): Site-source conversion factors.
ok_msegs_in (dict): Sample stock/energy data to use in developing sums.
ok_out (dict): Sum totals that should be yielded by function given
valid sample inputs.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
cls.aeo_years = ["2009", "2010"]
cls.ss_conv = {
"electricity": {"2009": 3, "2010": 4},
"natural gas": {"2009": 1, "2010": 1},
"distillate": {"2009": 1, "2010": 1},
"other fuel": {"2009": 1, "2010": 1}}
cls.ok_msegs_in = {
"AIA_CZ1": {
"single family home": {
"new homes": {"2009": 1, "2010": 1},
"total homes": {"2009": 10, "2010": 10},
"total square footage": {"2009": 100, "2010": 100},
"electricity": {
"lighting": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"secondary heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"cooling": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"water heating": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"natural gas": {
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"secondary heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"water heating": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}}},
"assembly": {
"new square footage": {"2009": 1, "2010": 1},
"total square footage": {"2009": 5, "2010": 5},
"electricity": {
"lighting": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"cooling": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"refrigeration": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}
},
"distillate": {
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"water heating": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}}}},
"AIA_CZ2": {
"single family home": {
"new homes": {"2009": 1, "2010": 1},
"total homes": {"2009": 100, "2010": 100},
"total square footage": {"2009": 1000, "2010": 1000},
"electricity": {
"lighting": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"secondary heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"cooling": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"water heating": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"natural gas": {
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"secondary heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"water heating": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}}},
"assembly": {
"new square footage": {"2009": 1, "2010": 1},
"total square footage": {"2009": 10, "2010": 10},
"electricity": {
"lighting": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"cooling": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"refrigeration": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}
},
"distillate": {
"heating": {
"supply": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}},
"demand": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}},
"water heating": {
"tech 1": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}},
"tech 2": {
"stock": {"2009": 1, "2010": 1},
"energy": {"2009": 1, "2010": 1}}}}}}}
cls.ok_out = {
"AIA_CZ1": {
"single family home": {
"new": {"2009": 2.2, "2010": 5.6},
"existing": {"2009": 19.8, "2010": 22.4}
},
"assembly": {
"new": {"2009": 2.8, "2010": 7.2},
"existing": {"2009": 11.2, "2010": 10.8}
}},
"AIA_CZ2": {
"single family home": {
"new": {"2009": 0.22, "2010": 0.56},
"existing": {"2009": 21.78, "2010": 27.44}
},
"assembly": {
"new": {"2009": 1.4, "2010": 3.6},
"existing": {"2009": 12.6, "2010": 14.4}
}}}
def test_ok(self):
"""Test for correct function output given valid inputs."""
self.dict_check(
htcl_totals.sum_htcl_energy(
self.ok_msegs_in, self.aeo_years, self.ss_conv),
self.ok_out)
# Offer external code execution (include all lines below this point in all
# test files)
def main():
"""Trigger default behavior of running all test fixtures in the file."""
unittest.main()
if __name__ == "__main__":
main()
| 51.392781
| 79
| 0.27456
| 1,793
| 24,206
| 3.682097
| 0.138873
| 0.139352
| 0.24947
| 0.277189
| 0.611936
| 0.607695
| 0.595274
| 0.595274
| 0.595274
| 0.595274
| 0
| 0.207644
| 0.57304
| 24,206
| 470
| 80
| 51.502128
| 0.431156
| 0.090267
| 0
| 0.858561
| 0
| 0
| 0.185314
| 0
| 0
| 0
| 0
| 0
| 0.007444
| 1
| 0.009926
| false
| 0
| 0.007444
| 0
| 0.022333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3bb507b77a6778e085bb52ca3348733be6a09dc2
| 7,405
|
py
|
Python
|
jose/tests/test_content_encryption.py
|
bwhmather/jose
|
8bf1f08afd5b4bd21f20bf86f3474afb671d8992
|
[
"BSD-3-Clause"
] | null | null | null |
jose/tests/test_content_encryption.py
|
bwhmather/jose
|
8bf1f08afd5b4bd21f20bf86f3474afb671d8992
|
[
"BSD-3-Clause"
] | null | null | null |
jose/tests/test_content_encryption.py
|
bwhmather/jose
|
8bf1f08afd5b4bd21f20bf86f3474afb671d8992
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from jose.algorithms.content_encryption import (
A128CBC_HS256, A192CBC_HS384, A256CBC_HS512,
)
class AES_CBC_HMAC_SHA2_Base(unittest.TestCase):
def test_encrypt(self):
encrypter = self.algorithm(self.key)
ciphertext, auth_token = encrypter.encrypt(
self.plaintext, adata=self.adata, iv=self.iv
)
self.assertEqual(self.ciphertext, ciphertext)
self.assertEqual(self.auth_token, auth_token)
def test_decrypt(self):
decrypter = self.algorithm(self.key)
plaintext = decrypter.decrypt(
self.ciphertext, auth_token=self.auth_token,
adata=self.adata, iv=self.iv
)
self.assertEqual(self.plaintext, plaintext)
class Test_A128CBC_HS256(AES_CBC_HMAC_SHA2_Base):
algorithm = A128CBC_HS256
key = (
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
b'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
)
plaintext = (
b'\x41\x20\x63\x69\x70\x68\x65\x72\x20\x73\x79\x73\x74\x65\x6d\x20'
b'\x6d\x75\x73\x74\x20\x6e\x6f\x74\x20\x62\x65\x20\x72\x65\x71\x75'
b'\x69\x72\x65\x64\x20\x74\x6f\x20\x62\x65\x20\x73\x65\x63\x72\x65'
b'\x74\x2c\x20\x61\x6e\x64\x20\x69\x74\x20\x6d\x75\x73\x74\x20\x62'
b'\x65\x20\x61\x62\x6c\x65\x20\x74\x6f\x20\x66\x61\x6c\x6c\x20\x69'
b'\x6e\x74\x6f\x20\x74\x68\x65\x20\x68\x61\x6e\x64\x73\x20\x6f\x66'
b'\x20\x74\x68\x65\x20\x65\x6e\x65\x6d\x79\x20\x77\x69\x74\x68\x6f'
b'\x75\x74\x20\x69\x6e\x63\x6f\x6e\x76\x65\x6e\x69\x65\x6e\x63\x65'
)
iv = (
b'\x1a\xf3\x8c\x2d\xc2\xb9\x6f\xfd\xd8\x66\x94\x09\x23\x41\xbc\x04'
)
adata = (
b'\x54\x68\x65\x20\x73\x65\x63\x6f\x6e\x64\x20\x70\x72\x69\x6e\x63'
b'\x69\x70\x6c\x65\x20\x6f\x66\x20\x41\x75\x67\x75\x73\x74\x65\x20'
b'\x4b\x65\x72\x63\x6b\x68\x6f\x66\x66\x73'
)
ciphertext = (
b'\xc8\x0e\xdf\xa3\x2d\xdf\x39\xd5\xef\x00\xc0\xb4\x68\x83\x42\x79'
b'\xa2\xe4\x6a\x1b\x80\x49\xf7\x92\xf7\x6b\xfe\x54\xb9\x03\xa9\xc9'
b'\xa9\x4a\xc9\xb4\x7a\xd2\x65\x5c\x5f\x10\xf9\xae\xf7\x14\x27\xe2'
b'\xfc\x6f\x9b\x3f\x39\x9a\x22\x14\x89\xf1\x63\x62\xc7\x03\x23\x36'
b'\x09\xd4\x5a\xc6\x98\x64\xe3\x32\x1c\xf8\x29\x35\xac\x40\x96\xc8'
b'\x6e\x13\x33\x14\xc5\x40\x19\xe8\xca\x79\x80\xdf\xa4\xb9\xcf\x1b'
b'\x38\x4c\x48\x6f\x3a\x54\xc5\x10\x78\x15\x8e\xe5\xd7\x9d\xe5\x9f'
b'\xbd\x34\xd8\x48\xb3\xd6\x95\x50\xa6\x76\x46\x34\x44\x27\xad\xe5'
b'\x4b\x88\x51\xff\xb5\x98\xf7\xf8\x00\x74\xb9\x47\x3c\x82\xe2\xdb'
)
auth_token = (
b'\x65\x2c\x3f\xa3\x6b\x0a\x7c\x5b\x32\x19\xfa\xb3\xa3\x0b\xc1\xc4'
)
class Test_A192CBC_HS384(AES_CBC_HMAC_SHA2_Base):
algorithm = A192CBC_HS384
key = (
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
b'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
b'\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f'
)
plaintext = (
b'\x41\x20\x63\x69\x70\x68\x65\x72\x20\x73\x79\x73\x74\x65\x6d\x20'
b'\x6d\x75\x73\x74\x20\x6e\x6f\x74\x20\x62\x65\x20\x72\x65\x71\x75'
b'\x69\x72\x65\x64\x20\x74\x6f\x20\x62\x65\x20\x73\x65\x63\x72\x65'
b'\x74\x2c\x20\x61\x6e\x64\x20\x69\x74\x20\x6d\x75\x73\x74\x20\x62'
b'\x65\x20\x61\x62\x6c\x65\x20\x74\x6f\x20\x66\x61\x6c\x6c\x20\x69'
b'\x6e\x74\x6f\x20\x74\x68\x65\x20\x68\x61\x6e\x64\x73\x20\x6f\x66'
b'\x20\x74\x68\x65\x20\x65\x6e\x65\x6d\x79\x20\x77\x69\x74\x68\x6f'
b'\x75\x74\x20\x69\x6e\x63\x6f\x6e\x76\x65\x6e\x69\x65\x6e\x63\x65'
)
iv = (
b'\x1a\xf3\x8c\x2d\xc2\xb9\x6f\xfd\xd8\x66\x94\x09\x23\x41\xbc\x04'
)
adata = (
b'\x54\x68\x65\x20\x73\x65\x63\x6f\x6e\x64\x20\x70\x72\x69\x6e\x63'
b'\x69\x70\x6c\x65\x20\x6f\x66\x20\x41\x75\x67\x75\x73\x74\x65\x20'
b'\x4b\x65\x72\x63\x6b\x68\x6f\x66\x66\x73'
)
ciphertext = (
b'\xea\x65\xda\x6b\x59\xe6\x1e\xdb\x41\x9b\xe6\x2d\x19\x71\x2a\xe5'
b'\xd3\x03\xee\xb5\x00\x52\xd0\xdf\xd6\x69\x7f\x77\x22\x4c\x8e\xdb'
b'\x00\x0d\x27\x9b\xdc\x14\xc1\x07\x26\x54\xbd\x30\x94\x42\x30\xc6'
b'\x57\xbe\xd4\xca\x0c\x9f\x4a\x84\x66\xf2\x2b\x22\x6d\x17\x46\x21'
b'\x4b\xf8\xcf\xc2\x40\x0a\xdd\x9f\x51\x26\xe4\x79\x66\x3f\xc9\x0b'
b'\x3b\xed\x78\x7a\x2f\x0f\xfc\xbf\x39\x04\xbe\x2a\x64\x1d\x5c\x21'
b'\x05\xbf\xe5\x91\xba\xe2\x3b\x1d\x74\x49\xe5\x32\xee\xf6\x0a\x9a'
b'\xc8\xbb\x6c\x6b\x01\xd3\x5d\x49\x78\x7b\xcd\x57\xef\x48\x49\x27'
b'\xf2\x80\xad\xc9\x1a\xc0\xc4\xe7\x9c\x7b\x11\xef\xc6\x00\x54\xe3'
)
auth_token = (
b'\x84\x90\xac\x0e\x58\x94\x9b\xfe\x51\x87\x5d\x73\x3f\x93\xac\x20'
b'\x75\x16\x80\x39\xcc\xc7\x33\xd7'
)
class Test_A256CBC_HS512(AES_CBC_HMAC_SHA2_Base):
algorithm = A256CBC_HS512
key = (
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f'
b'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
b'\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f'
b'\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f'
)
plaintext = (
b'\x41\x20\x63\x69\x70\x68\x65\x72\x20\x73\x79\x73\x74\x65\x6d\x20'
b'\x6d\x75\x73\x74\x20\x6e\x6f\x74\x20\x62\x65\x20\x72\x65\x71\x75'
b'\x69\x72\x65\x64\x20\x74\x6f\x20\x62\x65\x20\x73\x65\x63\x72\x65'
b'\x74\x2c\x20\x61\x6e\x64\x20\x69\x74\x20\x6d\x75\x73\x74\x20\x62'
b'\x65\x20\x61\x62\x6c\x65\x20\x74\x6f\x20\x66\x61\x6c\x6c\x20\x69'
b'\x6e\x74\x6f\x20\x74\x68\x65\x20\x68\x61\x6e\x64\x73\x20\x6f\x66'
b'\x20\x74\x68\x65\x20\x65\x6e\x65\x6d\x79\x20\x77\x69\x74\x68\x6f'
b'\x75\x74\x20\x69\x6e\x63\x6f\x6e\x76\x65\x6e\x69\x65\x6e\x63\x65'
)
iv = (
b'\x1a\xf3\x8c\x2d\xc2\xb9\x6f\xfd\xd8\x66\x94\x09\x23\x41\xbc\x04'
)
adata = (
b'\x54\x68\x65\x20\x73\x65\x63\x6f\x6e\x64\x20\x70\x72\x69\x6e\x63'
b'\x69\x70\x6c\x65\x20\x6f\x66\x20\x41\x75\x67\x75\x73\x74\x65\x20'
b'\x4b\x65\x72\x63\x6b\x68\x6f\x66\x66\x73'
)
ciphertext = (
b'\x4a\xff\xaa\xad\xb7\x8c\x31\xc5\xda\x4b\x1b\x59\x0d\x10\xff\xbd'
b'\x3d\xd8\xd5\xd3\x02\x42\x35\x26\x91\x2d\xa0\x37\xec\xbc\xc7\xbd'
b'\x82\x2c\x30\x1d\xd6\x7c\x37\x3b\xcc\xb5\x84\xad\x3e\x92\x79\xc2'
b'\xe6\xd1\x2a\x13\x74\xb7\x7f\x07\x75\x53\xdf\x82\x94\x10\x44\x6b'
b'\x36\xeb\xd9\x70\x66\x29\x6a\xe6\x42\x7e\xa7\x5c\x2e\x08\x46\xa1'
b'\x1a\x09\xcc\xf5\x37\x0d\xc8\x0b\xfe\xcb\xad\x28\xc7\x3f\x09\xb3'
b'\xa3\xb7\x5e\x66\x2a\x25\x94\x41\x0a\xe4\x96\xb2\xe2\xe6\x60\x9e'
b'\x31\xe6\xe0\x2c\xc8\x37\xf0\x53\xd2\x1f\x37\xff\x4f\x51\x95\x0b'
b'\xbe\x26\x38\xd0\x9d\xd7\xa4\x93\x09\x30\x80\x6d\x07\x03\xb1\xf6'
)
auth_token = (
b'\x4d\xd3\xb4\xc0\x88\xa7\xf4\x5c\x21\x68\x39\x64\x5b\x20\x12\xbf'
b'\x2e\x62\x69\xa8\xc5\x6a\x81\x6d\xbc\x1b\x26\x77\x61\x95\x5b\xc5'
)
def load_tests(loader, standard_tests, pattern):
return unittest.TestSuite((
loader.loadTestsFromTestCase(Test_A128CBC_HS256),
loader.loadTestsFromTestCase(Test_A192CBC_HS384),
loader.loadTestsFromTestCase(Test_A256CBC_HS512),
))
| 42.073864
| 75
| 0.636867
| 1,451
| 7,405
| 3.217781
| 0.189524
| 0.034697
| 0.017348
| 0.015421
| 0.508246
| 0.504391
| 0.487042
| 0.487042
| 0.487042
| 0.46948
| 0
| 0.310494
| 0.158406
| 7,405
| 175
| 76
| 42.314286
| 0.438703
| 0
| 0
| 0.430556
| 0
| 0.506944
| 0.651452
| 0.651452
| 0
| 1
| 0
| 0
| 0.020833
| 1
| 0.020833
| false
| 0
| 0.013889
| 0.006944
| 0.215278
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3bfada2366d53f055901db95b2e965098fc08271
| 35
|
py
|
Python
|
src/pwbus_http/__init__.py
|
fszostak/pwbus-web
|
1412ea9e7869f04fbfeccd212ac4f9ed28a6a17f
|
[
"MIT"
] | null | null | null |
src/pwbus_http/__init__.py
|
fszostak/pwbus-web
|
1412ea9e7869f04fbfeccd212ac4f9ed28a6a17f
|
[
"MIT"
] | 1
|
2021-04-16T00:43:09.000Z
|
2021-04-16T00:43:09.000Z
|
src/pwbus_http/__init__.py
|
fszostak/pwbus-web
|
1412ea9e7869f04fbfeccd212ac4f9ed28a6a17f
|
[
"MIT"
] | null | null | null |
# __init__.py
from . import server
| 11.666667
| 20
| 0.742857
| 5
| 35
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 35
| 2
| 21
| 17.5
| 0.758621
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
02238e7529201e6b8e5f434a65291956fc9af2a1
| 165
|
py
|
Python
|
autogl/module/__init__.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | 824
|
2020-11-30T14:38:07.000Z
|
2022-03-19T10:14:04.000Z
|
autogl/module/__init__.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | 38
|
2020-12-21T12:32:57.000Z
|
2022-01-31T02:32:05.000Z
|
autogl/module/__init__.py
|
dedsec-9/AutoGL
|
487f2b2f798b9b1363ad5dc100fb410b12222e06
|
[
"MIT"
] | 85
|
2020-12-21T05:16:09.000Z
|
2022-03-28T08:44:22.000Z
|
from . import feature, model, train, hpo, nas, ensemble
from .ensemble import *
from .feature import *
from .hpo import *
from .model import *
from .train import *
| 20.625
| 55
| 0.721212
| 23
| 165
| 5.173913
| 0.347826
| 0.336134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 165
| 7
| 56
| 23.571429
| 0.881481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
65fd27df92475b5614da5ef867df169ec8ad55cd
| 187
|
py
|
Python
|
openapi_python_client/parser/__init__.py
|
Maistho/openapi-python-client
|
e123b966e8d31db173e09cabc8284a855d07e425
|
[
"MIT"
] | 2
|
2020-10-15T07:25:50.000Z
|
2021-09-14T21:29:08.000Z
|
openapi_python_client/parser/__init__.py
|
Maistho/openapi-python-client
|
e123b966e8d31db173e09cabc8284a855d07e425
|
[
"MIT"
] | 79
|
2020-09-10T00:47:21.000Z
|
2022-03-25T02:07:31.000Z
|
openapi_python_client/parser/__init__.py
|
Maistho/openapi-python-client
|
e123b966e8d31db173e09cabc8284a855d07e425
|
[
"MIT"
] | 1
|
2020-11-03T00:11:57.000Z
|
2020-11-03T00:11:57.000Z
|
""" Classes representing the data in the OpenAPI schema """
__all__ = ["GeneratorData", "import_string_from_reference"]
from .openapi import GeneratorData, import_string_from_reference
| 31.166667
| 64
| 0.802139
| 22
| 187
| 6.363636
| 0.590909
| 0.271429
| 0.357143
| 0.414286
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112299
| 187
| 5
| 65
| 37.4
| 0.843373
| 0.272727
| 0
| 0
| 0
| 0
| 0.320313
| 0.21875
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5a00d7b477fcf498888514aef1011741f72965b5
| 33
|
py
|
Python
|
virtual-check.py
|
ZachZemo/testing-pack
|
d769ff5242660da9311b23059e64da11d6e991d4
|
[
"MIT"
] | null | null | null |
virtual-check.py
|
ZachZemo/testing-pack
|
d769ff5242660da9311b23059e64da11d6e991d4
|
[
"MIT"
] | null | null | null |
virtual-check.py
|
ZachZemo/testing-pack
|
d769ff5242660da9311b23059e64da11d6e991d4
|
[
"MIT"
] | null | null | null |
import example_pkg
print(1 + 1)
| 8.25
| 18
| 0.727273
| 6
| 33
| 3.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0.181818
| 33
| 3
| 19
| 11
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
5a180c482b4f52e795c9fd0bfc7efbe15a1b85f5
| 2,933
|
py
|
Python
|
stock_portfolio/stock_portfolio/tests/test_models_stock.py
|
zarkle/pyramid-stocks
|
493ad5a5b77e99dcff8e8234bf0616db1fbb4c98
|
[
"MIT"
] | null | null | null |
stock_portfolio/stock_portfolio/tests/test_models_stock.py
|
zarkle/pyramid-stocks
|
493ad5a5b77e99dcff8e8234bf0616db1fbb4c98
|
[
"MIT"
] | 4
|
2019-12-26T16:42:42.000Z
|
2020-01-06T18:53:34.000Z
|
stock_portfolio/stock_portfolio/tests/test_models_stock.py
|
zarkle/pyramid-stocks
|
493ad5a5b77e99dcff8e8234bf0616db1fbb4c98
|
[
"MIT"
] | null | null | null |
def test_stock_model(db_session):
"""test make a new stock"""
from ..models import Stock
assert len(db_session.query(Stock).all()) == 0
stock = Stock(
symbol="MU",
companyName="Micron Technology Inc.",
exchange="Nasdaq Global Select",
industry="Semiconductors",
website="http://www.micron.com",
description="Micron Technology Inc along with its subsidiaries provide memory and storage solutions. Its product portfolio consists of memory and storage technologies such as DRAM, NAND, NOR and 3D XPoint memory.",
CEO="Michael Stewart",
issueType="cs",
sector="Technology"
)
db_session.add(stock)
assert len(db_session.query(Stock).all()) == 1
# def test_make_user_no_password(db_session):
# """test can't make new user with no password"""
# from ..models import Stock
# import pytest
# from sqlalchemy.exc import DBAPIError
# assert len(db_session.query(Stock).all()) == 0
# user = Stock(
# username='me',
# password=None,
# email='me@me.com',
# )
# with pytest.raises(DBAPIError):
# db_session.add(user)
# assert len(db_session.query(Stock).all()) == 0
# assert db_session.query(Stock).one_or_none() is None
def test_make_stock_no_ceo(db_session):
"""test can make new stock with no ceo"""
from ..models import Stock
assert len(db_session.query(Stock).all()) == 0
stock = Stock(
symbol="MU",
companyName="Micron Technology Inc.",
exchange="Nasdaq Global Select",
industry="Semiconductors",
website="http://www.micron.com",
description="Micron Technology Inc along with its subsidiaries provide memory and storage solutions. Its product portfolio consists of memory and storage technologies such as DRAM, NAND, NOR and 3D XPoint memory.",
CEO="",
issueType="cs",
sector="Technology"
)
db_session.add(stock)
assert len(db_session.query(Stock).all()) == 1
def test_new_stock_in_database(db_session):
"""test new stock gets added to database"""
from ..models import Stock
assert len(db_session.query(Stock).all()) == 0
user = Stock(
symbol="MU",
companyName="Micron Technology Inc.",
exchange="Nasdaq Global Select",
industry="Semiconductors",
website="http://www.micron.com",
description="Micron Technology Inc along with its subsidiaries provide memory and storage solutions. Its product portfolio consists of memory and storage technologies such as DRAM, NAND, NOR and 3D XPoint memory.",
CEO="Michael Stewart",
issueType="cs",
sector="Technology"
)
db_session.add(user)
query = db_session.query(Stock)
stock = query.filter(Stock.symbol == 'MU').first()
assert isinstance(stock, Stock)
assert len(db_session.query(Stock).all()) == 1
| 36.209877
| 222
| 0.653256
| 371
| 2,933
| 5.072776
| 0.237197
| 0.086079
| 0.074389
| 0.100956
| 0.742827
| 0.742827
| 0.742827
| 0.742827
| 0.725824
| 0.706164
| 0
| 0.004874
| 0.230481
| 2,933
| 80
| 223
| 36.6625
| 0.828977
| 0.20866
| 0
| 0.764706
| 0
| 0.058824
| 0.393886
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5a51ee89b9eeac5958eb7116447a51a5dd4f50ea
| 99
|
py
|
Python
|
Lesson4/matr.py
|
shinkai-tester/python_beginner
|
a934328c9a50241cc3f02a423060e16aab53b425
|
[
"Apache-2.0"
] | 2
|
2021-06-01T13:24:04.000Z
|
2021-06-01T13:27:47.000Z
|
Lesson4/matr.py
|
shinkai-tester/python_beginner
|
a934328c9a50241cc3f02a423060e16aab53b425
|
[
"Apache-2.0"
] | null | null | null |
Lesson4/matr.py
|
shinkai-tester/python_beginner
|
a934328c9a50241cc3f02a423060e16aab53b425
|
[
"Apache-2.0"
] | null | null | null |
for i in range(1, 10):
for j in range(1, 10):
print(i * j, end=' ')
print(end='\n')
| 24.75
| 29
| 0.474747
| 19
| 99
| 2.473684
| 0.526316
| 0.297872
| 0.340426
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 0.313131
| 99
| 4
| 30
| 24.75
| 0.602941
| 0
| 0
| 0
| 0
| 0
| 0.03
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
ce66428f0c80237f3197f503efa4975a2a382a26
| 170
|
py
|
Python
|
esmonitor/views/index_view.py
|
cristianprice/pyesmonitor
|
df89968d7b2566a9e1c4afd89a1156a285580747
|
[
"Apache-2.0"
] | null | null | null |
esmonitor/views/index_view.py
|
cristianprice/pyesmonitor
|
df89968d7b2566a9e1c4afd89a1156a285580747
|
[
"Apache-2.0"
] | null | null | null |
esmonitor/views/index_view.py
|
cristianprice/pyesmonitor
|
df89968d7b2566a9e1c4afd89a1156a285580747
|
[
"Apache-2.0"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'esmonitor/index.html', {'none': 'none'})
| 24.285714
| 69
| 0.723529
| 21
| 170
| 5.857143
| 0.666667
| 0.162602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158824
| 170
| 6
| 70
| 28.333333
| 0.86014
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
ce9283e286495a9bef3f7f62965af7424b69d4aa
| 10,370
|
py
|
Python
|
gerenciador/anuncios/tests/test_views.py
|
diogo-alves/gerenciador_anuncios
|
fc62a818d803594ba0c31c755cbb83c165c1488f
|
[
"MIT"
] | null | null | null |
gerenciador/anuncios/tests/test_views.py
|
diogo-alves/gerenciador_anuncios
|
fc62a818d803594ba0c31c755cbb83c165c1488f
|
[
"MIT"
] | null | null | null |
gerenciador/anuncios/tests/test_views.py
|
diogo-alves/gerenciador_anuncios
|
fc62a818d803594ba0c31c755cbb83c165c1488f
|
[
"MIT"
] | null | null | null |
from http import HTTPStatus
from django.urls import reverse
from django.test import TestCase
from ..models import Anuncio, Cliente
class ClienteCreateViewTests(TestCase):
def setUp(self):
self.url = reverse('anuncios:cliente_create')
def test_template_utilizado(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/cliente_create.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_redirecionamento_apos_criacao(self):
response = self.client.post(self.url, data={'nome': 'Cliente1'}, follow=True)
cliente = response.context.get('cliente')
self.assertEqual(Cliente.objects.count(), 1)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertRedirects(response, reverse('anuncios:cliente_detail', kwargs={'pk': cliente.pk}))
def test_redirecionamento_apos_clicar_em_salvar_e_adicionar_outro(self):
dados_cliente = {
'nome': 'Anúncio 1',
'btn_salvar_e_adicionar_outro': ''
}
response = self.client.post(self.url, data=dados_cliente)
self.assertEqual(Cliente.objects.count(), 1)
self.assertEqual(response.status_code, HTTPStatus.FOUND)
self.assertRedirects(response, reverse('anuncios:cliente_create'))
class ClienteDetailViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.cliente = Cliente.objects.create(nome='Cliente 1')
def setUp(self):
self.url = reverse('anuncios:cliente_detail', kwargs={'pk': self.cliente.pk})
def test_template_utilizado(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/cliente_detail.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
class ClienteUpdateViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.cliente = Cliente.objects.create(nome='Cliente 1')
def setUp(self):
self.url = reverse('anuncios:cliente_update', kwargs={'pk': self.cliente.pk})
def test_template_utilizado(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/cliente_update.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_redirecionamento_apos_edicao(self):
dados_cliente = {'nome': 'Novo nome do cliente'}
response = self.client.post(self.url, data=dados_cliente)
cliente_atualizado = Cliente.objects.get(pk=self.cliente.pk)
self.assertEqual(cliente_atualizado.nome, dados_cliente['nome'])
self.assertRedirects(response, reverse('anuncios:cliente_detail', kwargs={'pk': self.cliente.pk}))
self.assertEqual(Cliente.objects.count(), 1)
class ClienteDeleteViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.cliente = Cliente.objects.create(nome='Cliente 1')
def setUp(self):
self.url = reverse('anuncios:cliente_delete', kwargs={'pk': self.cliente.pk})
def test_exclusao_de_objeto(self):
response = self.client.post(self.url)
self.assertEqual(Cliente.objects.count(), 0)
self.assertRedirects(response, reverse('anuncios:cliente_list'))
class ClienteListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
Cliente.objects.bulk_create([
Cliente(nome='Cliente 1'),
Cliente(nome='Cliente 2'),
])
def setUp(self):
self.url = reverse('anuncios:cliente_list')
def test_template_utilizado(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/cliente_list.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_quantidade_de_objetos_retornados(self):
response = self.client.get(self.url)
object_list = response.context.get('object_list')
self.assertEqual(len(object_list), 2)
class AnuncioCreateViewTests(TestCase):
def setUp(self):
self.url = reverse('anuncios:anuncio_create')
def test_templates_utilizados(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/anuncio_create.html')
self.assertTemplateUsed(response, 'anuncios/_anuncio_form_fields.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_redirecionamento_apos_criacao(self):
cliente = Cliente.objects.create(nome='Cliente 1')
dados_anuncio = {
'nome': 'Anúncio 1',
'cliente': cliente.pk,
'data_inicio': '2021-05-01',
'data_termino': '2021-05-31',
'investimento_diario': '5000'
}
response = self.client.post(self.url, data=dados_anuncio, follow=True)
anuncio = response.context.get('anuncio')
self.assertEqual(Anuncio.objects.count(), 1)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertRedirects(response, reverse('anuncios:anuncio_detail', kwargs={'pk': anuncio.pk}))
def test_redirecionamento_apos_clicar_em_salvar_e_adicionar_outro(self):
cliente = Cliente.objects.create(nome='Cliente 1')
dados_anuncio = {
'nome': 'Anúncio 1',
'cliente': cliente.pk,
'data_inicio': '2021-05-01',
'data_termino': '2021-05-31',
'investimento_diario': '5000',
'btn_salvar_e_adicionar_outro': ''
}
response = self.client.post(self.url, data=dados_anuncio)
self.assertEqual(Anuncio.objects.count(), 1)
self.assertEqual(response.status_code, HTTPStatus.FOUND)
self.assertRedirects(response, reverse('anuncios:anuncio_create'))
class AnuncioDetailViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cliente = Cliente.objects.create(nome='Cliente 1')
cls.anuncio = Anuncio.objects.create(
nome='Anúncio 1',
cliente=cliente,
data_inicio='2021-05-01',
data_termino='2021-05-31',
investimento_diario=5000.00
)
def setUp(self):
self.url = reverse('anuncios:anuncio_detail', kwargs={'pk': self.anuncio.pk})
def test_template_utilizado(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/anuncio_detail.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
class AnuncioUpdateViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.cliente = Cliente.objects.create(nome='Cliente 1')
cls.anuncio = Anuncio.objects.create(
nome='Anúncio 1',
cliente=cls.cliente,
data_inicio='2021-05-01',
data_termino='2021-05-31',
investimento_diario=5000.00
)
def setUp(self):
self.url = reverse('anuncios:anuncio_update', kwargs={'pk': self.anuncio.pk})
def test_templates_utilizados(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/anuncio_update.html')
self.assertTemplateUsed(response, 'anuncios/_anuncio_form_fields.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_redirecionamento_apos_edicao(self):
dados_anuncio = {
'nome': 'Anúncio editado',
'cliente': self.cliente.pk,
'data_inicio': '2021-05-01',
'data_termino': '2021-05-31',
'investimento_diario': '5000',
}
response = self.client.post(self.url, data=dados_anuncio)
anuncio_atualizado = Anuncio.objects.get(pk=self.anuncio.pk)
self.assertEqual(anuncio_atualizado.nome, dados_anuncio['nome'])
self.assertRedirects(response, reverse('anuncios:anuncio_detail', kwargs={'pk': self.anuncio.pk}))
self.assertEqual(Anuncio.objects.count(), 1)
class AnuncioDeleteViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cliente = Cliente.objects.create(nome='Cliente 1')
cls.anuncio = Anuncio.objects.create(
nome='Anúncio 1',
cliente=cliente,
data_inicio='2021-05-01',
data_termino='2021-05-31',
investimento_diario=5000.00
)
def setUp(self):
self.url = reverse('anuncios:anuncio_delete', kwargs={'pk': self.anuncio.pk})
def test_exclusao_de_objeto(self):
response = self.client.post(self.url)
self.assertEqual(Anuncio.objects.count(), 0)
self.assertRedirects(response, reverse('anuncios:anuncio_list'))
class AnuncioListViewTests(TestCase):
@classmethod
def setUpTestData(cls):
cliente = Cliente.objects.create(nome='Cliente')
Anuncio.objects.bulk_create([
Anuncio(
nome='Anúncio 1',
cliente=cliente,
data_inicio='2021-05-01',
data_termino='2021-05-31',
investimento_diario=5000.00
),
Anuncio(
nome='Anúncio 2',
cliente=cliente,
data_inicio='2021-04-01',
data_termino='2021-05-01',
investimento_diario=1000.00
)
])
def setUp(self):
self.url = reverse('anuncios:anuncio_list')
def test_template_utilizado(self):
response = self.client.get(self.url)
self.assertTemplateUsed(response, 'anuncios/anuncio_list.html')
def test_resposta_get(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_quantidade_de_objetos_retornados(self):
response = self.client.get(self.url)
object_list = response.context.get('object_list')
self.assertEqual(len(object_list), 2)
| 35.272109
| 106
| 0.657184
| 1,160
| 10,370
| 5.725
| 0.091379
| 0.037946
| 0.070471
| 0.069568
| 0.868092
| 0.858907
| 0.841741
| 0.810119
| 0.772625
| 0.725493
| 0
| 0.024727
| 0.223915
| 10,370
| 293
| 107
| 35.392491
| 0.800447
| 0
| 0
| 0.674009
| 0
| 0
| 0.132015
| 0.072324
| 0
| 0
| 0
| 0
| 0.185022
| 1
| 0.193833
| false
| 0
| 0.017621
| 0
| 0.255507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0c7675e739a8c7fe8730ae95cbbd459121a09f22
| 9,211
|
py
|
Python
|
SR/model/UNetSR.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | null | null | null |
SR/model/UNetSR.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | null | null | null |
SR/model/UNetSR.py
|
AntonyYX/Super-Resolution
|
9a5a55169b08849be39a42f0ee955feb60527fbf
|
[
"MIT"
] | 1
|
2021-10-02T11:03:49.000Z
|
2021-10-02T11:03:49.000Z
|
"""
@author: Huakun Shen, Abhishek Thakur
@reference: https://youtu.be/u1loyDCoGbE
"""
import torch
from torch import nn
from PIL import Image
from torch.nn.modules import padding
from torchvision import transforms
import torch.nn.functional as F
def double_convolution(in_c, out_c, ksize=3):
return nn.Sequential(
nn.Conv2d(in_c, out_c, kernel_size=ksize, padding=ksize//2),
nn.ReLU(inplace=True),
nn.Conv2d(out_c, out_c, kernel_size=ksize, padding=ksize//2),
nn.ReLU(inplace=True),
)
class UNetSR(nn.Module):
"""
unetsr = UNetSR(in_c=3, out_c=3, output_paddings=[1, 1]).to(device)
unet_config = {
'epochs': 150,
'save_period': 10,
'batch_size': 8,
'checkpoint_dir': RESULT_PATH / 'result/unetsr_100_300_perceptual_loss_w_seed',
'log_step': 10,
'start_epoch': 1,
'criterion': criterion,
'dataset_type': 'same_300',
'low_res': 100,
'high_res': 300,
'device': device,
'scheduler': {
'step_size': 5,
'gamma': 0.85
},
'optimizer': optim.Adam(unetsr.parameters(), lr=0.002),
'train_set_percentage': 0.9,
'num_worker': multiprocessing.cpu_count(),
'test_all_multiprocess_cpu': 1,
'test_only': False
}
"""
def __init__(self, in_c: int = 3, out_c: int = 3, ksize=3, output_paddings=[1, 1]):
"""output_paddings: second number is 0 when input size is 600, 1 if input size is 300"""
super(UNetSR, self).__init__()
self.MaxPool2d = nn.MaxPool2d(kernel_size=2, stride=2)
# encoder part
self.encoder_conv_1 = double_convolution(in_c, 64, ksize=ksize)
self.encoder_conv_2 = double_convolution(64, 128, ksize=ksize)
self.encoder_conv_3 = double_convolution(128, 256, ksize=ksize)
self.encoder_conv_4 = double_convolution(256, 512, ksize=ksize)
self.encoder_conv_5 = double_convolution(512, 1024, ksize=ksize)
# decoder part
self.ConvT2D_1 = nn.ConvTranspose2d(
in_channels=1024, out_channels=512, kernel_size=2, stride=2, output_padding=output_paddings[0])
self.decoder_conv_1 = double_convolution(1024, 512, ksize=ksize)
self.ConvT2D_2 = nn.ConvTranspose2d(
in_channels=512, out_channels=256, kernel_size=2, stride=2, output_padding=output_paddings[1])
self.decoder_conv_2 = double_convolution(512, 256, ksize=ksize)
self.ConvT2D_3 = nn.ConvTranspose2d(
in_channels=256, out_channels=128, kernel_size=2, stride=2)
self.decoder_conv_3 = double_convolution(256, 128, ksize=ksize)
self.ConvT2D_4 = nn.ConvTranspose2d(
in_channels=128, out_channels=64, kernel_size=2, stride=2)
self.decoder_conv_4 = double_convolution(128, 64, ksize=ksize)
# output layer to 3 channels
self.final = nn.Conv2d(64, out_c, kernel_size=1)
def forward(self, image):
x1 = self.encoder_conv_1(image) # to be concatenated to decoder
x2 = self.MaxPool2d(x1)
# print(1, x2.shape)
x3 = self.encoder_conv_2(x2) # to be concatenated to decoder
x4 = self.MaxPool2d(x3)
# print(2, x4.shape)
x5 = self.encoder_conv_3(x4) # to be concatenated to decoder
x6 = self.MaxPool2d(x5)
# print(3, x6.shape)
x7 = self.encoder_conv_4(x6) # to be concatenated to decoder
x8 = self.MaxPool2d(x7)
# print(4, x8.shape)
x9 = self.encoder_conv_5(x8)
# print(5, x9.shape)
x = self.ConvT2D_1(x9)
# print(6, x.shape)
x = self.decoder_conv_1(torch.cat([x, x7], 1))
# print(7, x.shape)
x = self.ConvT2D_2(x)
# print(8, x.shape)
x = self.decoder_conv_2(torch.cat([x, x5], 1))
# print(9, x.shape)
x = self.ConvT2D_3(x)
x = self.decoder_conv_3(torch.cat([x, x3], 1))
# print(10, x.shape)
x = self.ConvT2D_4(x)
# print(11, x.shape)
x = self.decoder_conv_4(torch.cat([x, x1], 1))
# print(12, x.shape)
x = self.final(x)
# print(13, x.shape)
return x
class UNetNoTop(nn.Module):
"""
remove top layer skip connection
"""
def __init__(self, in_c: int = 3, out_c: int = 3, ksize=3):
super(UNetNoTop, self).__init__()
self.MaxPool2d = nn.MaxPool2d(kernel_size=2, stride=2)
# encoder part
self.encoder_conv_1 = double_convolution(in_c, 64, ksize=ksize)
self.encoder_conv_2 = double_convolution(64, 128, ksize=ksize)
self.encoder_conv_3 = double_convolution(128, 256, ksize=ksize)
self.encoder_conv_4 = double_convolution(256, 512, ksize=ksize)
self.encoder_conv_5 = double_convolution(512, 1024, ksize=ksize)
# decoder part
self.ConvT2D_1 = nn.ConvTranspose2d(
in_channels=1024, out_channels=512, kernel_size=2, stride=2, output_padding=1)
self.decoder_conv_1 = double_convolution(1024, 512, ksize=ksize)
self.ConvT2D_2 = nn.ConvTranspose2d(
in_channels=512, out_channels=256, kernel_size=2, stride=2, output_padding=1)
self.decoder_conv_2 = double_convolution(512, 256, ksize=ksize)
self.ConvT2D_3 = nn.ConvTranspose2d(
in_channels=256, out_channels=128, kernel_size=2, stride=2)
self.decoder_conv_3 = double_convolution(256, 128, ksize=ksize)
self.ConvT2D_4 = nn.ConvTranspose2d(
in_channels=128, out_channels=64, kernel_size=2, stride=2)
self.decoder_conv_4 = double_convolution(64, 64, ksize=ksize)
# output layer to 3 channels
self.final = nn.Conv2d(64, out_c, kernel_size=1)
def forward(self, image):
x1 = self.encoder_conv_1(image) # to be concatenated to decoder
x2 = self.MaxPool2d(x1)
x3 = self.encoder_conv_2(x2) # to be concatenated to decoder
x4 = self.MaxPool2d(x3)
x5 = self.encoder_conv_3(x4) # to be concatenated to decoder
x6 = self.MaxPool2d(x5)
x7 = self.encoder_conv_4(x6) # to be concatenated to decoder
x8 = self.MaxPool2d(x7)
x9 = self.encoder_conv_5(x8)
x = self.ConvT2D_1(x9)
x = self.decoder_conv_1(torch.cat([x, x7], 1))
x = self.ConvT2D_2(x)
x = self.decoder_conv_2(torch.cat([x, x5], 1))
x = self.ConvT2D_3(x)
x = self.decoder_conv_3(torch.cat([x, x3], 1))
x = self.ConvT2D_4(x)
# x = self.decoder_conv_4(torch.cat([x, x1], 1))
x = self.decoder_conv_4(x)
x = self.final(x)
return x
class UNetD4(nn.Module):
"""
UNet depth=4, instead of original 5 layers
"""
def __init__(self, in_c: int = 3, out_c: int = 3, ksize=3):
super(UNetD4, self).__init__()
self.MaxPool2d = nn.MaxPool2d(kernel_size=2, stride=2)
# encoder part
self.encoder_conv_1 = double_convolution(in_c, 64, ksize=ksize)
self.encoder_conv_2 = double_convolution(64, 128, ksize=ksize)
self.encoder_conv_3 = double_convolution(128, 256, ksize=ksize)
self.encoder_conv_4 = double_convolution(256, 512, ksize=ksize)
self.encoder_conv_5 = double_convolution(512, 1024, ksize=ksize)
# decoder part
self.ConvT2D_1 = nn.ConvTranspose2d(
in_channels=1024, out_channels=512, kernel_size=2, stride=2, output_padding=1)
self.decoder_conv_1 = double_convolution(1024, 512, ksize=ksize)
self.ConvT2D_2 = nn.ConvTranspose2d(
in_channels=512, out_channels=256, kernel_size=2, stride=2, output_padding=1)
self.decoder_conv_2 = double_convolution(512, 256, ksize=ksize)
self.ConvT2D_3 = nn.ConvTranspose2d(
in_channels=256, out_channels=128, kernel_size=2, stride=2)
self.decoder_conv_3 = double_convolution(256, 128, ksize=ksize)
self.ConvT2D_4 = nn.ConvTranspose2d(
in_channels=128, out_channels=64, kernel_size=2, stride=2)
self.decoder_conv_4 = double_convolution(128, 64, ksize=ksize)
# output layer to 3 channels
self.final = nn.Conv2d(64, out_c, kernel_size=1)
def forward(self, image):
x1 = self.encoder_conv_1(image) # to be concatenated to decoder
x2 = self.MaxPool2d(x1)
x3 = self.encoder_conv_2(x2) # to be concatenated to decoder
x4 = self.MaxPool2d(x3)
x5 = self.encoder_conv_3(x4) # to be concatenated to decoder
x6 = self.MaxPool2d(x5)
x7 = self.encoder_conv_4(x6) # to be concatenated to decoder
x = self.ConvT2D_2(x7)
x = self.decoder_conv_2(torch.cat([x, x5], 1))
x = self.ConvT2D_3(x)
x = self.decoder_conv_3(torch.cat([x, x3], 1))
x = self.ConvT2D_4(x)
x = self.decoder_conv_4(torch.cat([x, x1], 1))
x = self.final(x)
return x
if __name__ == "__main__":
image = torch.rand((1, 3, 300, 300))
# print(image.size())
# model = UNetSR()
# model = UNetNoTop()
model = UNetD4()
# model = UNetSR(output_paddings=[1, 0])
out = model(image)
print(out.shape)
| 38.219917
| 107
| 0.625774
| 1,309
| 9,211
| 4.181054
| 0.126814
| 0.058286
| 0.079481
| 0.046592
| 0.789695
| 0.765028
| 0.748767
| 0.748767
| 0.748767
| 0.743651
| 0
| 0.083576
| 0.255673
| 9,211
| 240
| 108
| 38.379167
| 0.714702
| 0.196721
| 0
| 0.8
| 0
| 0
| 0.001109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.042857
| 0.007143
| 0.142857
| 0.007143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0cb50033cd8564760f4f1d546b7f76337d99cb3e
| 115
|
py
|
Python
|
nn/activation_functions.py
|
nemoNoboru/GEN0
|
715385f5f243db04c86e6737a8ee93c9af786078
|
[
"MIT"
] | 1
|
2017-11-27T09:19:59.000Z
|
2017-11-27T09:19:59.000Z
|
nn/activation_functions.py
|
nemoNoboru/GEN0
|
715385f5f243db04c86e6737a8ee93c9af786078
|
[
"MIT"
] | null | null | null |
nn/activation_functions.py
|
nemoNoboru/GEN0
|
715385f5f243db04c86e6737a8ee93c9af786078
|
[
"MIT"
] | null | null | null |
import numpy as np
def relu(i):
return np.vectorize(np.maximum(i, 0, i))
def tanh(i):
return np.tanh(i)
| 12.777778
| 44
| 0.634783
| 22
| 115
| 3.318182
| 0.545455
| 0.191781
| 0.246575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011111
| 0.217391
| 115
| 8
| 45
| 14.375
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0.2
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
0cb8a98d40512dbb0223d6197d119055c3144770
| 33
|
py
|
Python
|
xecd_rates/__init__.py
|
doguskidik/xecd-rates-python
|
6584c96891f5b91b9e7ceff6883ea4144b6326eb
|
[
"MIT"
] | 1
|
2020-07-07T20:58:36.000Z
|
2020-07-07T20:58:36.000Z
|
xecd_rates/__init__.py
|
doguskidik/xecd-rates-python
|
6584c96891f5b91b9e7ceff6883ea4144b6326eb
|
[
"MIT"
] | 1
|
2022-02-14T19:53:51.000Z
|
2022-02-14T19:53:51.000Z
|
xecd_rates/__init__.py
|
doguskidik/xecd-rates-python
|
6584c96891f5b91b9e7ceff6883ea4144b6326eb
|
[
"MIT"
] | null | null | null |
from xecd_rates.Xecd import Xecd
| 16.5
| 32
| 0.848485
| 6
| 33
| 4.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0cb9afae15e4b8401ecdc98f48752463215e843b
| 33,485
|
py
|
Python
|
gw-odw_Day2_with_Solns/Tuto_2.2_Matched_Filtering_In_action with solutions.py
|
basuparth/grav_wave_workshop3
|
eb9e2ff066bb1928e5a1dbc8cd8d24344515aae4
|
[
"MIT"
] | null | null | null |
gw-odw_Day2_with_Solns/Tuto_2.2_Matched_Filtering_In_action with solutions.py
|
basuparth/grav_wave_workshop3
|
eb9e2ff066bb1928e5a1dbc8cd8d24344515aae4
|
[
"MIT"
] | null | null | null |
gw-odw_Day2_with_Solns/Tuto_2.2_Matched_Filtering_In_action with solutions.py
|
basuparth/grav_wave_workshop3
|
eb9e2ff066bb1928e5a1dbc8cd8d24344515aae4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# <img style="float: left;padding: 1.3em" src="https://indico.in2p3.fr/event/18313/logo-786578160.png">
#
# # Gravitational Wave Open Data Workshop #3
#
#
# ## Tutorial 2.2 PyCBC Tutorial, Matched Filtering in Action
#
# We will be using the [PyCBC](http://github.com/ligo-cbc/pycbc) library, which is used to study gravitational-wave data, find astrophysical sources due to compact binary mergers, and study their parameters. These are some of the same tools that the LIGO and Virgo collaborations use to find gravitational waves in LIGO/Virgo data
#
# In this tutorial we will walk through how find a specific signal in LIGO data. We present matched filtering in PyCBC, which is optimal in the case of Gaussian noise and a known signal model. In reality our noise is not entirely Guassian, and in practice we use a variety of techniques to separate signals from noise in addition to the use of the matched filter.
#
# Additional [examples](http://pycbc.org/pycbc/latest/html/#library-examples-and-interactive-tutorials) and module level documentation are [here](http://pycbc.org/pycbc/latest/html/py-modindex.html)
# ## Installation (execute only if running on a cloud platform!)
# In[1]:
# -- Use the following for Google Colab
get_ipython().system(" pip install -q 'lalsuite==6.66' 'PyCBC==1.15.3'")
# **Important:** With Google Colab, you may need to restart the runtime after running the cell above.
# ### Looking for a specific signal in the data
#
# If you know what signal you are looking for in the data, then matched filtering is known to be the optimal method in Gaussian noise to extract the siganl. Even when the parameters of the signal are unkown, one can test for each set of parameters one is interesting in finding.
# #### preconditioning the data
#
# The purpose of this is to reduce the dynamic range of the data and supress low freqeuncy behavior which can introduce numerical artefacts. We may also wish to reduce the sample rate of the data if high frequency content is not important. PyCBC contains an interface to the GWOSC catalog, so you can easily access the data and parameters of the published gravitational-wave signals
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
import pylab
from pycbc.catalog import Merger
from pycbc.filter import resample_to_delta_t, highpass
# As an example we use the GW150914 data
merger = Merger("GW150914")
# Get the data from the Hanford detector
strain = merger.strain('H1')
# Remove the low frequency content and downsample the data to 2048Hz
strain = highpass(strain, 15.0)
strain = resample_to_delta_t(strain, 1.0/2048)
pylab.plot(strain.sample_times, strain)
pylab.xlabel('Time (s)')
pylab.show()
# _Note_: To read data from a local file instead of from the GWOSC server, we can use the [pycbc.frame.read_frame(file, channel_name)](https://github.com/gwastro/pycbc/blob/master/docs/frame.rst) method.
# #### filter wraparound
#
# Note the spike in the data at the boundaries. This is caused by the highpass and resampling stages filtering the data. When the filter is applied to the boundaries, it wraps around to the beginning of the data. Since the data itself has a discontinuity (i.e. it is not cyclic) the filter itself will ring off for a time up to the length of the filter.
#
# Even if a visible transient is not seen, we want to avoid filters that act on times which are not causally connect. To avoid this we trim the ends of the data sufficiently to ensure that they do not wraparound the input. We will enforce this requirement in all steps of our filtering.
# In[3]:
# Remove 2 seconds of data from both the beginning and end
conditioned = strain.crop(2, 2)
pylab.plot(conditioned.sample_times, conditioned)
pylab.xlabel('Time (s)')
pylab.show()
# #### calculate the power spectral density
#
# Optimal matched filtering requires weighting the frequency components of the potential signal and data by the noise amplitude. We can view this as filtering the data with the time series equivelant of 1 / PSD. To ensure that we can control the effective length of the filter, we window the time domain equivalent of the PSD to a specific length. This has the effect of losing some information about line behavior in the detector, however, since our signals span a large frequency range, and lines are narrow, this is a negligible effect.
#
# Important note: Computing a PSD from data that might contain signals, non-Gaussianities and non-stationarities is not trivial. In this example we use Welch's method to obtain a PSD estimate. PyCBC's PSD module contains tools for measuring PSDs, or directly using pre-generated PSDs.
# In[4]:
from pycbc.psd import interpolate, inverse_spectrum_truncation
# Estimate the power spectral density
# We use 4 second samples of our time series in Welch method.
psd = conditioned.psd(4)
# Now that we have the psd we need to interpolate it to match our data
# and then limit the filter length of 1 / PSD. After this, we can
# directly use this PSD to filter the data in a controlled manner
psd = interpolate(psd, conditioned.delta_f)
# 1/PSD will now act as a filter with an effective length of 4 seconds
# Since the data has been highpassed above 15 Hz, and will have low values
# below this we need to inform the function to not include frequencies
# below this frequency.
psd = inverse_spectrum_truncation(psd, 4 * conditioned.sample_rate,
low_frequency_cutoff=15)
# #### make your signal model
#
# Conceptually, matched filtering involves laying the potential signal over your data and integrating (after weighting frequencies correctly). If there is a signal in the data that aligns with your 'template', you will get a large value when integrated over.
# In[5]:
from pycbc.waveform import get_td_waveform
# In this case we "know" what the signal parameters are. In a search
# we would grid over the parameters and calculate the SNR time series
# for each one
# We'll assume equal masses, and non-rotating black holes which is within the posterior probability
# of GW150914.
m = 36 # Solar masses
hp, hc = get_td_waveform(approximant="SEOBNRv4_opt",
mass1=m,
mass2=m,
delta_t=conditioned.delta_t,
f_lower=20)
# We will resize the vector to match our data
hp.resize(len(conditioned))
# The waveform begins at the start of the vector, so if we want the
# SNR time series to correspond to the approximate merger location
# we need to shift the data so that the merger is approximately at the
# first bin of the data.
# The cyclic_time_shift method shifts the timeseries by a given amount of time.
# It treats the data as if it were on a ring so points shifted off the end
# of the series reappear at the start. Note that time stamps are *not* in
# general affected (as the start time of the full array is shifted),
# but the index of each point in the vector is.
#
# By convention waveforms returned from `get_td_waveform` have their
# merger stamped with time zero, so we can use the start time to
# shift the merger into position
pylab.figure()
pylab.title('Before shifting')
pylab.plot(hp.sample_times, hp)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
template = hp.cyclic_time_shift(hp.start_time)
pylab.figure()
pylab.title('After shifting')
pylab.plot(template.sample_times, template)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
# #### calculating the signal-to-noise time series
#
# In this section we will now calculate the signal-to-noise time series for our template. We'll take care to handle issues of filter corruption / wraparound by truncating the output time series. We need to account for both the length of the template and 1 / PSD.
# In[6]:
from pycbc.filter import matched_filter
import numpy
snr = matched_filter(template, conditioned,
psd=psd, low_frequency_cutoff=20)
# Remove time corrupted by the template filter and the psd filter
# We remove 4 seonds at the beginning and end for the PSD filtering
# And we remove 4 additional seconds at the beginning to account for
# the template length (this is somewhat generous for
# so short a template). A longer signal such as from a BNS, would
# require much more padding at the beginning of the vector.
snr = snr.crop(4 + 4, 4)
# Why are we taking an abs() here?
# The `matched_filter` function actually returns a 'complex' SNR.
# What that means is that the real portion correponds to the SNR
# associated with directly filtering the template with the data.
# The imaginary portion corresponds to filtering with a template that
# is 90 degrees out of phase. Since the phase of a signal may be
# anything, we choose to maximize over the phase of the signal.
pylab.figure(figsize=[10, 4])
pylab.plot(snr.sample_times, abs(snr))
pylab.ylabel('Signal-to-noise')
pylab.xlabel('Time (s)')
pylab.show()
peak = abs(snr).numpy().argmax()
snrp = snr[peak]
time = snr.sample_times[peak]
print("We found a signal at {}s with SNR {}".format(time,
abs(snrp)))
# ### Aligning and Subtracting the Proposed Signal
#
# In the previous section we found a peak in the signal-to-noise for a proposed binary black hole merger. We can use this SNR peak to align our proposal to the data, and to also subtract our proposal from the data.
# In[7]:
from pycbc.filter import sigma
# The time, amplitude, and phase of the SNR peak tell us how to align
# our proposed signal with the data.
# Shift the template to the peak time
dt = time - conditioned.start_time
aligned = template.cyclic_time_shift(dt)
# scale the template so that it would have SNR 1 in this data
aligned /= sigma(aligned, psd=psd, low_frequency_cutoff=20.0)
# Scale the template amplitude and phase to the peak value
aligned = (aligned.to_frequencyseries() * snrp).to_timeseries()
aligned.start_time = conditioned.start_time
# #### Visualize the overlap between the signal and data
#
# To compare the data an signal on equal footing, and to concentrate on the frequency range that is important. We will whiten both the template and the data, and then bandpass both the data and template between 30-300 Hz. In this way, any signal that is in the data is transformed in the same way that the template is.
# In[8]:
# We do it this way so that we can whiten both the template and the data
white_data = (conditioned.to_frequencyseries() / psd**0.5).to_timeseries()
white_template = (aligned.to_frequencyseries() / psd**0.5).to_timeseries()
white_data = white_data.highpass_fir(30., 512).lowpass_fir(300, 512)
white_template = white_template.highpass_fir(30, 512).lowpass_fir(300, 512)
# Select the time around the merger
white_data = white_data.time_slice(merger.time-.2, merger.time+.1)
white_template = white_template.time_slice(merger.time-.2, merger.time+.1)
pylab.figure(figsize=[15, 3])
pylab.plot(white_data.sample_times, white_data, label="Data")
pylab.plot(white_template.sample_times, white_template, label="Template")
pylab.legend()
pylab.show()
# #### Subtracting the signal from the data
#
# Now that we've aligned the template we can simply subtract it. Let's see below how that looks in the time-frequency plots!
# In[9]:
subtracted = conditioned - aligned
# Plot the original data and the subtracted signal data
for data, title in [(conditioned, 'Original H1 Data'),
(subtracted, 'Signal Subtracted from H1 Data')]:
t, f, p = data.whiten(4, 4).qtransform(.001,
logfsteps=100,
qrange=(8, 8),
frange=(20, 512))
pylab.figure(figsize=[15, 3])
pylab.title(title)
pylab.pcolormesh(t, f, p**0.5, vmin=1, vmax=6)
pylab.yscale('log')
pylab.xlabel('Time (s)')
pylab.ylabel('Frequency (Hz)')
pylab.xlim(merger.time - 2, merger.time + 1)
pylab.show()
# ## Challenge!
#
# Use the methods demonstrated above to see if you can calculate the SNR
# time series in the following data sets. What is the SNR of each signal?
# Which template matched best to which data?
#
# Information that may be useful:
#
# * Signals are all placed between 100 and 120 seconds into the frame file.
# * You may assume mass1 = mass1 (equal mass) and that each component mass is one of 15, 30, or 45.
# * Each file starts at gps time 0, and ends at gps time 128
# * The channel name in each file is "H1:TEST-STRAIN"
# In[10]:
# Download the challenge set files
from pycbc.frame import read_frame
import urllib
def get_file(fname):
url = "https://github.com/gw-odw/odw-2020/raw/master/Data/{}"
url = url.format(fname)
urllib.request.urlretrieve(url, fname)
print('Getting : {}'.format(url))
files = ['PyCBC_T2_0.gwf', 'PyCBC_T2_1.gwf', 'PyCBC_T2_2.gwf']
for fname in files:
get_file(fname)
# An example of how to read the data from these files:
file_name = "PyCBC_T2_0.gwf"
# LOSC bulk data typically uses the same convention for internal channels names
# Strain is typically IFO:LOSC-STRAIN, where IFO can be H1/L1/V1.
channel_name = "H1:TEST-STRAIN"
start = 0
end = start + 128
ts = read_frame(file_name, channel_name, start, end)
# ### Analysis of PyCBC_T2_0.gwf
# In[11]:
get_ipython().run_line_magic('matplotlib', 'inline')
import pylab
from pycbc.catalog import Merger
from pycbc.filter import resample_to_delta_t, highpass
# As an example we use the GW150914 data
file_name1 = "PyCBC_T2_0.gwf"
channel_name1 = "H1:TEST-STRAIN"
start = 0
end = start + 128
ts1 = read_frame(file_name1, channel_name1, start, end)
# Remove the low frequency content and downsample the data to 2048Hz
strain1 = highpass(ts1, 15.0)
strain1 = resample_to_delta_t(strain1, 1.0/2048)
pylab.plot(strain1.sample_times, strain1)
pylab.xlabel('Time (s)')
pylab.show()
# In[12]:
# Remove 2 seconds of data from both the beginning and end
conditioned1 = strain1.crop(2, 2)
pylab.plot(conditioned1.sample_times, conditioned1)
pylab.xlabel('Time (s)')
pylab.show()
# In[13]:
from pycbc.psd import interpolate, inverse_spectrum_truncation
# Estimate the power spectral density
# We use 4 second samples of our time series in Welch method.
psd1 = conditioned1.psd(4)
# Now that we have the psd we need to interpolate it to match our data
# and then limit the filter length of 1 / PSD. After this, we can
# directly use this PSD to filter the data in a controlled manner
psd1 = interpolate(psd1, conditioned1.delta_f)
# 1/PSD will now act as a filter with an effective length of 4 seconds
# Since the data has been highpassed above 15 Hz, and will have low values
# below this we need to inform the function to not include frequencies
# below this frequency.
psd1 = inverse_spectrum_truncation(psd1, 4 * conditioned1.sample_rate,
low_frequency_cutoff=15)
# In[14]:
from pycbc.waveform import get_td_waveform
# In this case we "know" what the signal parameters are. In a search
# we would grid over the parameters and calculate the SNR time series
# for each one
# We'll assume equal masses, and non-rotating black holes which is within the posterior probability
# of GW150914.
m = 45 # Solar masses
hp, hc = get_td_waveform(approximant="SEOBNRv4_opt",
mass1=m,
mass2=m,
delta_t=conditioned1.delta_t,
f_lower=20)
# We will resize the vector to match our data
hp.resize(len(conditioned1))
# The waveform begins at the start of the vector, so if we want the
# SNR time series to correspond to the approximate merger location
# we need to shift the data so that the merger is approximately at the
# first bin of the data.
# The cyclic_time_shift method shifts the timeseries by a given amount of time.
# It treats the data as if it were on a ring so points shifted off the end
# of the series reappear at the start. Note that time stamps are *not* in
# general affected (as the start time of the full array is shifted),
# but the index of each point in the vector is.
#
# By convention waveforms returned from `get_td_waveform` have their
# merger stamped with time zero, so we can use the start time to
# shift the merger into position
pylab.figure()
pylab.title('Before shifting')
pylab.plot(hp.sample_times, hp)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
template = hp.cyclic_time_shift(hp.start_time)
pylab.figure()
pylab.title('After shifting')
pylab.plot(template.sample_times, template)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
# In[15]:
from pycbc.filter import matched_filter
import numpy
snr = matched_filter(template, conditioned1,
psd=psd1, low_frequency_cutoff=20)
# Remove time corrupted by the template filter and the psd filter
# We remove 4 seonds at the beginning and end for the PSD filtering
# And we remove 4 additional seconds at the beginning to account for
# the template length (this is somewhat generous for
# so short a template). A longer signal such as from a BNS, would
# require much more padding at the beginning of the vector.
snr = snr.crop(4 + 4, 4)
# Why are we taking an abs() here?
# The `matched_filter` function actually returns a 'complex' SNR.
# What that means is that the real portion correponds to the SNR
# associated with directly filtering the template with the data.
# The imaginary portion corresponds to filtering with a template that
# is 90 degrees out of phase. Since the phase of a signal may be
# anything, we choose to maximize over the phase of the signal.
pylab.figure(figsize=[10, 4])
pylab.plot(snr.sample_times, abs(snr))
pylab.ylabel('Signal-to-noise')
pylab.xlabel('Time (s)')
pylab.show()
peak = abs(snr).numpy().argmax()
snrp = snr[peak]
time = snr.sample_times[peak]
print("We found a signal at {}s with SNR {}".format(time,
abs(snrp)))
# In[16]:
from pycbc.filter import sigma
# The time, amplitude, and phase of the SNR peak tell us how to align
# our proposed signal with the data.
# Shift the template to the peak time
dt = time - conditioned1.start_time
aligned = template.cyclic_time_shift(dt)
# scale the template so that it would have SNR 1 in this data
aligned /= sigma(aligned, psd=psd1, low_frequency_cutoff=20.0)
# Scale the template amplitude and phase to the peak value
aligned = (aligned.to_frequencyseries() * snrp).to_timeseries()
aligned.start_time = conditioned1.start_time
# In[17]:
# We do it this way so that we can whiten both the template and the data
white_data = (conditioned1.to_frequencyseries() / psd1**0.5).to_timeseries()
white_template = (aligned.to_frequencyseries() / psd1**0.5).to_timeseries()
white_data = white_data.highpass_fir(30., 512).lowpass_fir(300, 512)
white_template = white_template.highpass_fir(30, 512).lowpass_fir(300, 512)
# Select the time around the merger
white_data = white_data.time_slice(time-.2, time+.1)
white_template = white_template.time_slice(time-.2, time+.1)
pylab.figure(figsize=[15, 3])
pylab.plot(white_data.sample_times, white_data, label="Data")
pylab.plot(white_template.sample_times, white_template, label="Template")
pylab.legend()
pylab.show()
# In[19]:
subtracted = conditioned1 - aligned
# Plot the original data and the subtracted signal data
for data, title in [(conditioned1, 'Original H1 Data'),
(subtracted, 'Signal Subtracted from H1 Data')]:
t, f, p = data.whiten(4, 4).qtransform(.001,
logfsteps=100,
qrange=(8, 8),
frange=(20, 512))
pylab.figure(figsize=[15, 3])
pylab.title(title)
pylab.pcolormesh(t, f, p**0.5, vmin=1, vmax=6)
pylab.yscale('log')
pylab.xlabel('Time (s)')
pylab.ylabel('Frequency (Hz)')
pylab.xlim(time - 2, time + 1)
pylab.show()
# ### Analysis of PyCBC_T2_1.gwf
# In[20]:
get_ipython().run_line_magic('matplotlib', 'inline')
import pylab
from pycbc.catalog import Merger
from pycbc.filter import resample_to_delta_t, highpass
# As an example we use the GW150914 data
file_name1 = "PyCBC_T2_1.gwf"
channel_name1 = "H1:TEST-STRAIN"
start = 0
end = start + 128
ts1 = read_frame(file_name1, channel_name1, start, end)
# Remove the low frequency content and downsample the data to 2048Hz
strain1 = highpass(ts1, 15.0)
strain1 = resample_to_delta_t(strain1, 1.0/2048)
pylab.plot(strain1.sample_times, strain1)
pylab.xlabel('Time (s)')
pylab.show()
# In[21]:
# Remove 2 seconds of data from both the beginning and end
conditioned1 = strain1.crop(2, 2)
pylab.plot(conditioned1.sample_times, conditioned1)
pylab.xlabel('Time (s)')
pylab.show()
# In[22]:
from pycbc.psd import interpolate, inverse_spectrum_truncation
# Estimate the power spectral density
# We use 4 second samples of our time series in Welch method.
psd1 = conditioned1.psd(4)
# Now that we have the psd we need to interpolate it to match our data
# and then limit the filter length of 1 / PSD. After this, we can
# directly use this PSD to filter the data in a controlled manner
psd1 = interpolate(psd1, conditioned1.delta_f)
# 1/PSD will now act as a filter with an effective length of 4 seconds
# Since the data has been highpassed above 15 Hz, and will have low values
# below this we need to inform the function to not include frequencies
# below this frequency.
psd1 = inverse_spectrum_truncation(psd1, 4 * conditioned1.sample_rate,
low_frequency_cutoff=15)
# In[23]:
from pycbc.waveform import get_td_waveform
# In this case we "know" what the signal parameters are. In a search
# we would grid over the parameters and calculate the SNR time series
# for each one
# We'll assume equal masses, and non-rotating black holes which is within the posterior probability
# of GW150914.
m = 30 # Solar masses
hp, hc = get_td_waveform(approximant="SEOBNRv4_opt",
mass1=m,
mass2=m,
delta_t=conditioned1.delta_t,
f_lower=20)
# We will resize the vector to match our data
hp.resize(len(conditioned1))
# The waveform begins at the start of the vector, so if we want the
# SNR time series to correspond to the approximate merger location
# we need to shift the data so that the merger is approximately at the
# first bin of the data.
# The cyclic_time_shift method shifts the timeseries by a given amount of time.
# It treats the data as if it were on a ring so points shifted off the end
# of the series reappear at the start. Note that time stamps are *not* in
# general affected (as the start time of the full array is shifted),
# but the index of each point in the vector is.
#
# By convention waveforms returned from `get_td_waveform` have their
# merger stamped with time zero, so we can use the start time to
# shift the merger into position
pylab.figure()
pylab.title('Before shifting')
pylab.plot(hp.sample_times, hp)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
template = hp.cyclic_time_shift(hp.start_time)
pylab.figure()
pylab.title('After shifting')
pylab.plot(template.sample_times, template)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
# In[24]:
from pycbc.filter import matched_filter
import numpy
snr = matched_filter(template, conditioned1,
psd=psd1, low_frequency_cutoff=20)
# Remove time corrupted by the template filter and the psd filter
# We remove 4 seonds at the beginning and end for the PSD filtering
# And we remove 4 additional seconds at the beginning to account for
# the template length (this is somewhat generous for
# so short a template). A longer signal such as from a BNS, would
# require much more padding at the beginning of the vector.
snr = snr.crop(4 + 4, 4)
# Why are we taking an abs() here?
# The `matched_filter` function actually returns a 'complex' SNR.
# What that means is that the real portion correponds to the SNR
# associated with directly filtering the template with the data.
# The imaginary portion corresponds to filtering with a template that
# is 90 degrees out of phase. Since the phase of a signal may be
# anything, we choose to maximize over the phase of the signal.
pylab.figure(figsize=[10, 4])
pylab.plot(snr.sample_times, abs(snr))
pylab.ylabel('Signal-to-noise')
pylab.xlabel('Time (s)')
pylab.show()
peak = abs(snr).numpy().argmax()
snrp = snr[peak]
time = snr.sample_times[peak]
print("We found a signal at {}s with SNR {}".format(time,
abs(snrp)))
# In[25]:
from pycbc.filter import sigma
# The time, amplitude, and phase of the SNR peak tell us how to align
# our proposed signal with the data.
# Shift the template to the peak time
dt = time - conditioned1.start_time
aligned = template.cyclic_time_shift(dt)
# scale the template so that it would have SNR 1 in this data
aligned /= sigma(aligned, psd=psd1, low_frequency_cutoff=20.0)
# Scale the template amplitude and phase to the peak value
aligned = (aligned.to_frequencyseries() * snrp).to_timeseries()
aligned.start_time = conditioned1.start_time
# In[26]:
# We do it this way so that we can whiten both the template and the data
white_data = (conditioned1.to_frequencyseries() / psd1**0.5).to_timeseries()
white_template = (aligned.to_frequencyseries() / psd1**0.5).to_timeseries()
white_data = white_data.highpass_fir(30., 512).lowpass_fir(300, 512)
white_template = white_template.highpass_fir(30, 512).lowpass_fir(300, 512)
# Select the time around the merger
white_data = white_data.time_slice(time-.2, time+.1)
white_template = white_template.time_slice(time-.2, time+.1)
pylab.figure(figsize=[15, 3])
pylab.plot(white_data.sample_times, white_data, label="Data")
pylab.plot(white_template.sample_times, white_template, label="Template")
pylab.legend()
pylab.show()
# In[27]:
subtracted = conditioned1 - aligned
# Plot the original data and the subtracted signal data
for data, title in [(conditioned1, 'Original H1 Data'),
(subtracted, 'Signal Subtracted from H1 Data')]:
t, f, p = data.whiten(4, 4).qtransform(.001,
logfsteps=100,
qrange=(8, 8),
frange=(20, 512))
pylab.figure(figsize=[15, 3])
pylab.title(title)
pylab.pcolormesh(t, f, p**0.5, vmin=1, vmax=6)
pylab.yscale('log')
pylab.xlabel('Time (s)')
pylab.ylabel('Frequency (Hz)')
pylab.xlim(time - 2, time + 1)
pylab.show()
# ### Analysis of PyCBC_T2_2.gwf
# In[28]:
get_ipython().run_line_magic('matplotlib', 'inline')
import pylab
from pycbc.catalog import Merger
from pycbc.filter import resample_to_delta_t, highpass
# As an example we use the GW150914 data
file_name1 = "PyCBC_T2_2.gwf"
channel_name1 = "H1:TEST-STRAIN"
start = 0
end = start + 128
ts1 = read_frame(file_name1, channel_name1, start, end)
# Remove the low frequency content and downsample the data to 2048Hz
strain1 = highpass(ts1, 15.0)
strain1 = resample_to_delta_t(strain1, 1.0/2048)
pylab.plot(strain1.sample_times, strain1)
pylab.xlabel('Time (s)')
pylab.show()
# In[29]:
# Remove 2 seconds of data from both the beginning and end
conditioned1 = strain1.crop(2, 2)
pylab.plot(conditioned1.sample_times, conditioned1)
pylab.xlabel('Time (s)')
pylab.show()
# In[30]:
from pycbc.psd import interpolate, inverse_spectrum_truncation
# Estimate the power spectral density
# We use 4 second samples of our time series in Welch method.
psd1 = conditioned1.psd(4)
# Now that we have the psd we need to interpolate it to match our data
# and then limit the filter length of 1 / PSD. After this, we can
# directly use this PSD to filter the data in a controlled manner
psd1 = interpolate(psd1, conditioned1.delta_f)
# 1/PSD will now act as a filter with an effective length of 4 seconds
# Since the data has been highpassed above 15 Hz, and will have low values
# below this we need to inform the function to not include frequencies
# below this frequency.
psd1 = inverse_spectrum_truncation(psd1, 4 * conditioned1.sample_rate,
low_frequency_cutoff=15)
# In[31]:
from pycbc.waveform import get_td_waveform
# In this case we "know" what the signal parameters are. In a search
# we would grid over the parameters and calculate the SNR time series
# for each one
# We'll assume equal masses, and non-rotating black holes which is within the posterior probability
# of GW150914.
m = 15 # Solar masses
hp, hc = get_td_waveform(approximant="SEOBNRv4_opt",
mass1=m,
mass2=m,
delta_t=conditioned1.delta_t,
f_lower=20)
# We will resize the vector to match our data
hp.resize(len(conditioned1))
# The waveform begins at the start of the vector, so if we want the
# SNR time series to correspond to the approximate merger location
# we need to shift the data so that the merger is approximately at the
# first bin of the data.
# The cyclic_time_shift method shifts the timeseries by a given amount of time.
# It treats the data as if it were on a ring so points shifted off the end
# of the series reappear at the start. Note that time stamps are *not* in
# general affected (as the start time of the full array is shifted),
# but the index of each point in the vector is.
#
# By convention waveforms returned from `get_td_waveform` have their
# merger stamped with time zero, so we can use the start time to
# shift the merger into position
pylab.figure()
pylab.title('Before shifting')
pylab.plot(hp.sample_times, hp)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
template = hp.cyclic_time_shift(hp.start_time)
pylab.figure()
pylab.title('After shifting')
pylab.plot(template.sample_times, template)
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
# In[32]:
from pycbc.filter import matched_filter
import numpy
snr = matched_filter(template, conditioned1,
psd=psd1, low_frequency_cutoff=20)
# Remove time corrupted by the template filter and the psd filter
# We remove 4 seonds at the beginning and end for the PSD filtering
# And we remove 4 additional seconds at the beginning to account for
# the template length (this is somewhat generous for
# so short a template). A longer signal such as from a BNS, would
# require much more padding at the beginning of the vector.
snr = snr.crop(4 + 4, 4)
# Why are we taking an abs() here?
# The `matched_filter` function actually returns a 'complex' SNR.
# What that means is that the real portion correponds to the SNR
# associated with directly filtering the template with the data.
# The imaginary portion corresponds to filtering with a template that
# is 90 degrees out of phase. Since the phase of a signal may be
# anything, we choose to maximize over the phase of the signal.
pylab.figure(figsize=[10, 4])
pylab.plot(snr.sample_times, abs(snr))
pylab.ylabel('Signal-to-noise')
pylab.xlabel('Time (s)')
pylab.show()
peak = abs(snr).numpy().argmax()
snrp = snr[peak]
time = snr.sample_times[peak]
print("We found a signal at {}s with SNR {}".format(time,
abs(snrp)))
# In[33]:
from pycbc.filter import sigma
# The time, amplitude, and phase of the SNR peak tell us how to align
# our proposed signal with the data.
# Shift the template to the peak time
dt = time - conditioned1.start_time
aligned = template.cyclic_time_shift(dt)
# scale the template so that it would have SNR 1 in this data
aligned /= sigma(aligned, psd=psd1, low_frequency_cutoff=20.0)
# Scale the template amplitude and phase to the peak value
aligned = (aligned.to_frequencyseries() * snrp).to_timeseries()
aligned.start_time = conditioned1.start_time
# In[34]:
# We do it this way so that we can whiten both the template and the data
white_data = (conditioned1.to_frequencyseries() / psd1**0.5).to_timeseries()
white_template = (aligned.to_frequencyseries() / psd1**0.5).to_timeseries()
white_data = white_data.highpass_fir(30., 512).lowpass_fir(300, 512)
white_template = white_template.highpass_fir(30, 512).lowpass_fir(300, 512)
# Select the time around the merger
white_data = white_data.time_slice(time-.2, time+.1)
white_template = white_template.time_slice(time-.2, time+.1)
pylab.figure(figsize=[15, 3])
pylab.plot(white_data.sample_times, white_data, label="Data")
pylab.plot(white_template.sample_times, white_template, label="Template")
pylab.legend()
pylab.show()
# In[35]:
subtracted = conditioned1 - aligned
# Plot the original data and the subtracted signal data
for data, title in [(conditioned1, 'Original H1 Data'),
(subtracted, 'Signal Subtracted from H1 Data')]:
t, f, p = data.whiten(4, 4).qtransform(.001,
logfsteps=100,
qrange=(8, 8),
frange=(20, 512))
pylab.figure(figsize=[15, 3])
pylab.title(title)
pylab.pcolormesh(t, f, p**0.5, vmin=1, vmax=6)
pylab.yscale('log')
pylab.xlabel('Time (s)')
pylab.ylabel('Frequency (Hz)')
pylab.xlim(time - 2, time + 1)
pylab.show()
| 34.843913
| 539
| 0.719785
| 5,301
| 33,485
| 4.478778
| 0.115261
| 0.017101
| 0.015163
| 0.016174
| 0.772808
| 0.770491
| 0.762278
| 0.759835
| 0.754528
| 0.75238
| 0
| 0.027958
| 0.19567
| 33,485
| 960
| 540
| 34.880208
| 0.853563
| 0.53845
| 0
| 0.878049
| 0
| 0
| 0.082787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00271
| false
| 0.04336
| 0.092141
| 0
| 0.094851
| 0.01355
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b22ce77678cabd821f89ea234c8c27cfd57af15
| 3,960
|
py
|
Python
|
src/HartreeParticleDSL/test/backends/C_AOS/test_C_AOS_op_precedence.py
|
stfc/HartreeParticleDSL
|
17990f1a85c9cbec3c4dfa0923e2c44cad6f381c
|
[
"MIT"
] | null | null | null |
src/HartreeParticleDSL/test/backends/C_AOS/test_C_AOS_op_precedence.py
|
stfc/HartreeParticleDSL
|
17990f1a85c9cbec3c4dfa0923e2c44cad6f381c
|
[
"MIT"
] | 47
|
2021-09-16T10:28:05.000Z
|
2022-03-15T14:24:33.000Z
|
src/HartreeParticleDSL/test/backends/C_AOS/test_C_AOS_op_precedence.py
|
stfc/HartreeParticleDSL
|
17990f1a85c9cbec3c4dfa0923e2c44cad6f381c
|
[
"MIT"
] | 1
|
2021-09-27T15:20:01.000Z
|
2021-09-27T15:20:01.000Z
|
from HartreeParticleDSL.backends.C_AOS.visitors import *
from HartreeParticleDSL.backends.C_AOS.C_AOS import *
import ast
import inspect
import textwrap
import pytest
from HartreeParticleDSL.HartreeParticleDSLExceptions import IllegalLoopError, UnsupportedCodeError, \
IllegalArgumentCountError
import HartreeParticleDSL.HartreeParticleDSL as HartreeParticleDSL
def test_plus_mul():
'''Test the plus_mul order for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = 1 + 2 * 3
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( 1 + ( 2 * 3 ) )" in out
def test_plus_div():
'''Test the plus_div order for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = 1 + 2 / 3
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( 1 + ( 2 / 3 ) )" in out
def test_mul_div():
'''Test the mul_div order for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = 1 * 2 / 3
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( ( 1 * 2 ) / 3 )" in out
def a():
b = 1 / 2 * 3
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( ( 1 / 2 ) * 3 )" in out
def test_bracket_plus_mul():
'''Test how brackets affect the plus_mul order for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = (1 + 2) * 3
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( ( 1 + 2 ) * 3 )" in out
def test_bracket_plus_div():
'''Test how brackets affect the plus_div order for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = (1 + 2) / 3
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( ( 1 + 2 ) / 3 )" in out
def test_gte_plus():
'''Test order of gte and plus for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = 3 >= 1 + 2
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( 3 >= ( 1 + 2 ) )" in out
def test_gt_plus():
'''Test order of gt and plus for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = 3 > 1 + 2
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( 3 > ( 1 + 2 ) )" in out
def test_lt_plus():
'''Test order of lt and plus for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = 3 < 1 + 2
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( 3 < ( 1 + 2 ) )" in out
def test_lte_plus():
'''Test order of lte and plus for C_AOS'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = 3 <= 1 + 2
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "( 3 <= ( 1 + 2 ) )" in out
def test_not_land():
'''Test order of not and l_and works'''
aos = C_AOS()
aos.disable_variable_checks()
HartreeParticleDSL.set_backend(aos)
v = c_visitor(aos)
def a():
b = not z and y
c = ast.parse(textwrap.dedent(inspect.getsource(a)))
out = v.visit(c)
assert "!z && y" in out
| 29.117647
| 101
| 0.604798
| 581
| 3,960
| 3.967298
| 0.101549
| 0.038178
| 0.057701
| 0.081128
| 0.813449
| 0.783948
| 0.765727
| 0.765727
| 0.765727
| 0.765727
| 0
| 0.020485
| 0.260354
| 3,960
| 135
| 102
| 29.333333
| 0.766473
| 0.09798
| 0
| 0.640351
| 0
| 0
| 0.050852
| 0
| 0
| 0
| 0
| 0
| 0.096491
| 1
| 0.184211
| false
| 0
| 0.070175
| 0
| 0.254386
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b3b45963ab4c3f2fbd5f8c7f998da42bc91f385
| 4,907
|
py
|
Python
|
pysal/spreg/tests/test_ml_lag.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pysal/spreg/tests/test_ml_lag.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pysal/spreg/tests/test_ml_lag.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-07-19T01:46:17.000Z
|
2021-07-19T01:46:17.000Z
|
import unittest
import pysal
import scipy
import numpy as np
from pysal.spreg.ml_lag import ML_Lag
from pysal.spreg import utils
from pysal.common import RTOL
from skip import SKIP
@unittest.skipIf(SKIP,
"Skipping MLLag Tests")
class TestMLError(unittest.TestCase):
def setUp(self):
db = pysal.open(pysal.examples.get_path("baltim.dbf"),'r')
self.ds_name = "baltim.dbf"
self.y_name = "PRICE"
self.y = np.array(db.by_col(self.y_name)).T
self.y.shape = (len(self.y),1)
self.x_names = ["NROOM","AGE","SQFT"]
self.x = np.array([db.by_col(var) for var in self.x_names]).T
ww = pysal.open(pysal.examples.get_path("baltim_q.gal"))
self.w = ww.read()
ww.close()
self.w_name = "baltim_q.gal"
self.w.transform = 'r'
def _estimate_and_compare(self, **kwargs):
reg = ML_Lag(self.y, self.x, w=self.w,
name_y=self.y_name, name_x=self.x_names,
name_w=self.w_name,name_ds=self.ds_name,
**kwargs)
betas = np.array([[-6.04040164],
[ 3.48995114],
[-0.20103955],
[ 0.65462382],
[ 0.62351143]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 47.51218398])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([-0.51218398])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 211
np.testing.assert_allclose(reg.n,n,RTOL)
k = 5
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 47.])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 4. , 148. , 11.25])
np.testing.assert_allclose(reg.x[0],x,RTOL)
e = np.array([ 41.99251608])
np.testing.assert_allclose(reg.e_pred[0],e,RTOL)
my = 44.307180094786695
np.testing.assert_allclose(reg.mean_y,my)
sy = 23.606076835380495
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 28.57288755, 1.42341656, 0.00288068, 0.02956392, 0.00332139])
np.testing.assert_allclose(reg.vm.diagonal(),vm,RTOL)
sig2 = 216.27525647243797
np.testing.assert_allclose(reg.sig2,sig2,RTOL)
pr2 = 0.6133020721559487
np.testing.assert_allclose(reg.pr2,pr2)
std_err = np.array([ 5.34536131, 1.19307022, 0.05367198, 0.17194162, 0.05763147])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
logll = -875.92771143484833
np.testing.assert_allclose(reg.logll,logll,RTOL)
aic = 1761.8554228696967
np.testing.assert_allclose(reg.aic,aic,RTOL)
schwarz = 1778.614713537077
np.testing.assert_allclose(reg.schwarz,schwarz,RTOL)
def test_dense(self):
self._estimate_and_compare(method='FULL')
def test_ord(self):
reg = ML_Lag(self.y, self.x, w=self.w,
name_y=self.y_name, name_x=self.x_names,
name_w=self.w_name,name_ds=self.ds_name,
method='ORD')
betas = np.array([[-6.04040164],
[ 3.48995114],
[-0.20103955],
[ 0.65462382],
[ 0.62351143]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 47.51218398])
np.testing.assert_allclose(reg.u[0],u,RTOL)
predy = np.array([-0.51218398])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
n = 211
np.testing.assert_allclose(reg.n,n,RTOL)
k = 5
np.testing.assert_allclose(reg.k,k,RTOL)
y = np.array([ 47.])
np.testing.assert_allclose(reg.y[0],y,RTOL)
x = np.array([ 1. , 4. , 148. , 11.25])
np.testing.assert_allclose(reg.x[0],x,RTOL)
e = np.array([ 41.99251608])
np.testing.assert_allclose(reg.e_pred[0],e,RTOL)
my = 44.307180094786695
np.testing.assert_allclose(reg.mean_y,my)
sy = 23.606076835380495
np.testing.assert_allclose(reg.std_y,sy)
vm = np.array([ 28.63404, 1.423698, 0.002884738, 0.02957845,
0.003379166])
np.testing.assert_allclose(reg.vm.diagonal(),vm,RTOL)
sig2 = 216.27525647243797
np.testing.assert_allclose(reg.sig2,sig2,RTOL)
pr2 = 0.6133020721559487
np.testing.assert_allclose(reg.pr2,pr2)
std_err = np.array([ 5.351078, 1.193188, 0.05371, 0.171984, 0.058131])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
logll = -875.92771143484833
np.testing.assert_allclose(reg.logll,logll,RTOL)
aic = 1761.8554228696967
np.testing.assert_allclose(reg.aic,aic,RTOL)
schwarz = 1778.614713537077
np.testing.assert_allclose(reg.schwarz,schwarz,RTOL)
def test_LU(self):
self._estimate_and_compare(method='LU')
if __name__ == '__main__':
unittest.main()
| 39.256
| 93
| 0.606888
| 699
| 4,907
| 4.120172
| 0.20887
| 0.10625
| 0.177083
| 0.271528
| 0.777083
| 0.759028
| 0.736806
| 0.7125
| 0.7125
| 0.7125
| 0
| 0.165075
| 0.253108
| 4,907
| 124
| 94
| 39.572581
| 0.620737
| 0
| 0
| 0.666667
| 0
| 0
| 0.020379
| 0
| 0
| 0
| 0
| 0
| 0.290598
| 1
| 0.042735
| false
| 0
| 0.068376
| 0
| 0.119658
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b5771c93105240321463e60f1b9fe3e52a3c477
| 72
|
py
|
Python
|
Snowflake/__init__.py
|
leonardozcm/Point-Completion-Fig-AutoGenerator
|
109f5a414f51469fac82d0d23cde69efb9cf97e0
|
[
"Apache-2.0"
] | 31
|
2021-08-22T15:01:58.000Z
|
2022-03-19T12:26:21.000Z
|
models/__init__.py
|
AllenXiangX/PMP-Net
|
c6a65da629f0faafd3b1e2dd060e84ab53b9379f
|
[
"MIT"
] | 10
|
2021-09-06T09:07:38.000Z
|
2022-02-12T08:12:54.000Z
|
models/__init__.py
|
leonardozcm/SnowflakeNet
|
93e7151610765e7e2b41ace2d03c8750f0b6c80c
|
[
"MIT"
] | 5
|
2021-08-30T00:53:17.000Z
|
2022-03-20T11:57:25.000Z
|
import sys
sys.path.append('../pointnet2_ops_lib')
sys.path.append('..')
| 24
| 39
| 0.722222
| 11
| 72
| 4.545455
| 0.636364
| 0.28
| 0.52
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.041667
| 72
| 3
| 40
| 24
| 0.710145
| 0
| 0
| 0
| 0
| 0
| 0.30137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0b5ede3b5aca3c01f8462524a1fc6a7c8300c5ca
| 124
|
py
|
Python
|
testing/unit2_test.py
|
cerrno/swe-talk-materials
|
0f11e34a850959202d2e5fb2ecd71416195ddc7c
|
[
"MIT"
] | null | null | null |
testing/unit2_test.py
|
cerrno/swe-talk-materials
|
0f11e34a850959202d2e5fb2ecd71416195ddc7c
|
[
"MIT"
] | null | null | null |
testing/unit2_test.py
|
cerrno/swe-talk-materials
|
0f11e34a850959202d2e5fb2ecd71416195ddc7c
|
[
"MIT"
] | null | null | null |
from unit2 import palindrome
def test_palindrome():
assert palindrome('test') == 0
assert palindrome('kayak') == 1
| 20.666667
| 35
| 0.693548
| 15
| 124
| 5.666667
| 0.666667
| 0.376471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029703
| 0.185484
| 124
| 6
| 35
| 20.666667
| 0.811881
| 0
| 0
| 0
| 0
| 0
| 0.072
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0b8faf317e65360d408b20d346c5a502085dad2e
| 66
|
py
|
Python
|
acq4/analysis/modules/AtlasBuilder/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 47
|
2015-01-05T16:18:10.000Z
|
2022-03-16T13:09:30.000Z
|
acq4/analysis/modules/AtlasBuilder/__init__.py
|
aleonlein/acq4
|
4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555
|
[
"MIT"
] | 48
|
2015-04-19T16:51:41.000Z
|
2022-03-31T14:48:16.000Z
|
acq4/analysis/modules/AtlasBuilder/__init__.py
|
sensapex/acq4
|
9561ba73caff42c609bd02270527858433862ad8
|
[
"MIT"
] | 32
|
2015-01-15T14:11:49.000Z
|
2021-07-15T13:44:52.000Z
|
from __future__ import print_function
from .AtlasBuilder import *
| 22
| 37
| 0.848485
| 8
| 66
| 6.375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 66
| 2
| 38
| 33
| 0.87931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
0b994af2ff79fc252a33133ef6a2bb8ad6219384
| 20,337
|
py
|
Python
|
tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
|
MathMachado/tensorflow
|
56afda20b15f234c23e8393f7e337e7dd2659c2d
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
|
MathMachado/tensorflow
|
56afda20b15f234c23e8393f7e337e7dd2659c2d
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
|
MathMachado/tensorflow
|
56afda20b15f234c23e8393f7e337e7dd2659c2d
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged.to_tensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_conversion_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToTensorOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
"""Example from ragged_to_tensor.__doc__."""
rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]])
dt = rt.to_tensor()
self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])
@parameterized.parameters(
{
'rt_input': [],
'ragged_rank': 1,
'expected': [],
'expected_shape': [0, 0],
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'default': 9,
'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]
},
{
'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],
'ragged_rank':
1,
'default': [9],
'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],
[[5], [6], [9]]]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'expected': [
[[1, 2], [0, 0], [3, 4]], #
[[0, 0], [0, 0], [0, 0]], #
[[5, 0], [0, 0], [0, 0]], #
[[6, 7], [8, 0], [0, 0]], #
]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'default':
9,
'expected': [
[[1, 2], [9, 9], [3, 4]], #
[[9, 9], [9, 9], [9, 9]], #
[[5, 9], [9, 9], [9, 9]], #
[[6, 7], [8, 9], [9, 9]], #
]
},
{
'rt_input': [[[1], [2], [3]]],
'ragged_rank': 1,
'default': 0,
'expected': [[[1], [2], [3]]],
},
{
'rt_input': [[[[1], [2]], [], [[3]]]],
'default': 9,
'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],
},
)
def testRaggedTensorToTensor(self,
rt_input,
expected,
ragged_rank=None,
default=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
dt = rt.to_tensor(default)
self.assertIsInstance(dt, ops.Tensor)
self.assertEqual(rt.dtype, dt.dtype)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
if expected_shape is not None:
expected = np.ndarray(expected_shape, buffer=np.array(expected))
self.assertAllEqual(dt, expected)
@parameterized.parameters(
{
'rt_input': [[1, 2, 3]],
'default': [0],
'error': (ValueError, r'Shape \(1,\) must have rank at most 0'),
},
{
'rt_input': [[[1, 2], [3, 4]], [[5, 6]]],
'ragged_rank': 1,
'default': [7, 8, 9],
'error': (ValueError, r'Shapes \(3,\) and \(2,\) are incompatible'),
},
{
'rt_input': [[1, 2, 3]],
'default': 'a',
'error': (TypeError, '.*'),
},
)
def testError(self, rt_input, default, error, ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
with self.assertRaisesRegexp(error[0], error[1]):
rt.to_tensor(default)
# This covers the tests above, but with the new implementation.
@test_util.run_all_in_graph_and_eager_modes
class RaggedTensorToTensorOpNewTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testDocStringExamples(self):
"""Example from ragged_to_tensor.__doc__."""
rt = ragged_factory_ops.constant([[9, 8, 7], [], [6, 5], [4]])
dt = ragged_conversion_ops.ragged_to_dense(rt)
self.assertAllEqual(dt, [[9, 8, 7], [0, 0, 0], [6, 5, 0], [4, 0, 0]])
@parameterized.parameters(
{
'rt_input': [],
'ragged_rank': 1,
'expected': [],
'expected_shape': [0, 0],
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'expected': [[1, 2, 3], [0, 0, 0], [4, 0, 0], [5, 6, 0]]
},
{
'rt_input': [[1, 2, 3], [], [4], [5, 6]],
'default': 9,
'expected': [[1, 2, 3], [9, 9, 9], [4, 9, 9], [5, 6, 9]]
},
{
'rt_input': [[[1], [2], [3]], [], [[4]], [[5], [6]]],
'ragged_rank':
1,
'default': [9],
'expected': [[[1], [2], [3]], [[9], [9], [9]], [[4], [9], [9]],
[[5], [6], [9]]]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'expected': [
[[1, 2], [0, 0], [3, 4]], #
[[0, 0], [0, 0], [0, 0]], #
[[5, 0], [0, 0], [0, 0]], #
[[6, 7], [8, 0], [0, 0]], #
]
},
{
'rt_input': [[[1, 2], [], [3, 4]], [], [[5]], [[6, 7], [8]]],
'default':
9,
'expected': [
[[1, 2], [9, 9], [3, 4]], #
[[9, 9], [9, 9], [9, 9]], #
[[5, 9], [9, 9], [9, 9]], #
[[6, 7], [8, 9], [9, 9]], #
]
},
{
'rt_input': [[[1], [2], [3]]],
'ragged_rank': 1,
'default': 0,
'expected': [[[1], [2], [3]]],
},
{
'rt_input': [[[[1], [2]], [], [[3]]]],
'default': 9,
'expected': [[[[1], [2]], [[9], [9]], [[3], [9]]]],
},
)
def testRaggedTensorToTensor(self,
rt_input,
expected,
ragged_rank=None,
default=None,
expected_shape=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
dt = ragged_conversion_ops.ragged_to_dense(rt, default_value=default)
self.assertIsInstance(dt, ops.Tensor)
self.assertEqual(rt.dtype, dt.dtype)
self.assertTrue(dt.shape.is_compatible_with(rt.shape))
if expected_shape is not None:
expected = np.ndarray(expected_shape, buffer=np.array(expected))
self.assertAllEqual(dt, expected)
@parameterized.parameters(
{
'rt_input': [[1, 2, 3]],
'default': 'a',
'error': (TypeError, '.*'),
}, {
'rt_input': [[1, 2, 3]],
'default': 'b',
'error': (TypeError, '.*'),
})
def testError(self, rt_input, default, error, ragged_rank=None):
rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
with self.assertRaisesRegexp(error[0], error[1]):
ragged_conversion_ops.ragged_to_dense(rt, default_value=default)
@test_util.run_all_in_graph_and_eager_modes
class RaggedToTensorOpAdditionalTests(test_util.TensorFlowTestCase):
def _compare_to_reference(self,
ragged_tensor,
expected=None,
default_value=None):
treatment = ragged_conversion_ops.ragged_to_dense(
ragged_tensor, default_value=default_value)
control = ragged_tensor.to_tensor(default_value=default_value)
self.assertAllEqual(control, treatment)
if expected is not None:
self.assertAllEqual(expected, treatment)
def test_already_dense_simple(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([6, 7, 8, 9, 10, 11], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data, [[6, 7, 8], [9, 10, 11]])
def test_already_dense_with_dense_values_and_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]],
dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]],
default_value=constant_op.constant([31, 32], dtype=dtypes.int64))
def test_already_dense_with_dense_values(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15], [16, 17]],
dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [16, 17]]])
def test_ragged_with_dense_values_and_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 3]]],
default_value=[2, 3])
def test_ragged_with_dense_values_and_small_default(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[6, 7], [8, 9], [10, 11], [12, 13], [14, 15]], dtype=dtypes.int64),
value_rowids=constant_op.constant([0, 0, 0, 1, 1], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data, [[[6, 7], [8, 9], [10, 11]], [[12, 13], [14, 15], [2, 2]]],
default_value=2)
def test_already_dense_with_dense_values_string(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
[[b'a', b'b'], [b'c', b'd'], [b'e', b'f'], [b'g', b'jalapeno'],
[b'kangaroo', b'llama'], [b'manzana', b'nectar']],
dtype=dtypes.string),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data,
[[[b'a', b'b'], [b'c', b'd'], [b'e', b'f']],
[[b'g', b'jalapeno'], [b'kangaroo', b'llama'],
[b'manzana', b'nectar']]])
def test_already_dense_with_string(self):
"""This studies a tensor initialized with value_rowids and nrows."""
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant(
['a', 'b', 'c', 'd', 'e', 'antidisestablishmentarianism'],
dtype=dtypes.string),
value_rowids=constant_op.constant([0, 0, 0, 1, 1, 1],
dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(
input_data,
[[b'a', b'b', b'c'], [b'd', b'e', b'antidisestablishmentarianism']])
def test_already_dense(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [3, 4, 5]])
self._compare_to_reference(input_data, [[0, 1, 2], [3, 4, 5]])
def test_true_ragged(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
self._compare_to_reference(input_data, [[0, 1, 2], [0, 0, 0], [3, 0, 0]])
def test_true_ragged_default_3(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
self._compare_to_reference(
input_data, [[0, 1, 2], [3, 3, 3], [3, 3, 3]], default_value=3)
def test_three_dimensional_ragged(self):
input_data = ragged_factory_ops.constant([[[0, 1, 2], []], [], [[3]]])
self._compare_to_reference(
input_data, [[[0, 1, 2], [3, 3, 3]], [[3, 3, 3], [3, 3, 3]],
[[3, 3, 3], [3, 3, 3]]],
default_value=3)
def test_empty_tensor(self):
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([], dtype=dtypes.int64),
value_rowids=constant_op.constant([], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data, [[], []], default_value=3)
def test_empty_last(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []])
self._compare_to_reference(input_data,
[[0, 1, 2], [0, 0, 0], [3, 0, 0], [0, 0, 0]])
def test_shape_limit(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[2, 3])
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_tuple(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=(2, 3))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_tensor_shape(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, 3]))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_half_limit_tensor_shape(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, None]))
self.assertAllEqual(actual, [[0, 1, 2, 3], [0, 0, 0, 0]])
def test_skip_eager_shape_half_limit_tensor_shape(self):
# Eager would produce a shape of [2, 4]
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=tensor_shape.TensorShape([2, None]))
result = actual.shape.as_list()
# This is equal to [2, 4] in eager, or [2, None] in non-eager.
self.assertEqual(result[0], 2)
def test_shape_limit_shape_is_tensor_int64(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int64))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_limit_shape_is_tensor_int32(self):
input_data = ragged_factory_ops.constant([[0, 1, 2, 3], [], [4], []])
actual = ragged_conversion_ops.ragged_to_dense(
input_data, shape=constant_op.constant([2, 3], dtype=dtypes.int32))
self.assertAllEqual(actual, [[0, 1, 2], [0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [2, 3])
def test_shape_expand_first_dim(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3]])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[4, 4])
self.assertAllEqual(
actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0], [0, 0, 0, 0]])
self.assertEqual(actual.shape.as_list(), [4, 4])
def test_value_transposed(self):
# This test tries to get a tensor in columnar format, where I am uncertain
# as to whether the underlying op, which copies data in the raw format,
# could fail.
my_value = array_ops.transpose(
constant_op.constant([[0, 1, 2, 3], [4, 5, 6, 7]]))
input_data = RaggedTensor.from_value_rowids(
values=my_value,
value_rowids=constant_op.constant([0, 1, 2, 3], dtype=dtypes.int64),
nrows=constant_op.constant(4, dtype=dtypes.int64),
validate=True)
self._compare_to_reference(input_data,
[[[0, 4]], [[1, 5]], [[2, 6]], [[3, 7]]])
# This fails on the older version of to_tensor.
def test_broadcast_default(self):
# This test is commented out. The functionality here is not supported.
# The dense dimension here is 2 x 2
input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []],
ragged_rank=1)
# This placeholder has a 2 x 1 dimension.
default_value = array_ops.placeholder_with_default([[5], [6]], shape=None)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=default_value)
expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]]
self.assertAllEqual(actual, expected)
# This fails on the older version of to_tensor.
def test_broadcast_default_no_placeholder(self):
# Again, this functionality is not supported. It fails more gracefully
# when creating the op.
input_data = ragged_factory_ops.constant([[[[1, 2], [3, 4]]], []],
ragged_rank=1)
# default_value has a 2 x 1 dimension.
default_value = constant_op.constant([[5], [6]], shape=None)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=default_value)
expected = [[[[1, 2], [3, 4]]], [[[5, 5], [6, 6]]]]
self.assertAllEqual(actual, expected)
def test_shape_expand_second_dim(self):
input_data = ragged_factory_ops.constant([[0, 1, 2], [], [3], []])
actual = ragged_conversion_ops.ragged_to_dense(input_data, shape=[3, 4])
self.assertAllEqual(actual, [[0, 1, 2, 0], [0, 0, 0, 0], [3, 0, 0, 0]])
def test_empty_tensor_with_shape(self):
input_data = RaggedTensor.from_value_rowids(
values=constant_op.constant([], dtype=dtypes.int64),
value_rowids=constant_op.constant([], dtype=dtypes.int64),
nrows=constant_op.constant(2, dtype=dtypes.int64),
validate=True)
actual = ragged_conversion_ops.ragged_to_dense(
input_data, default_value=3, shape=[2, 3])
self.assertAllEqual(actual, [[3, 3, 3], [3, 3, 3]])
if __name__ == '__main__':
googletest.main()
| 41.168016
| 80
| 0.55844
| 2,664
| 20,337
| 4.053679
| 0.095721
| 0.016668
| 0.014168
| 0.00926
| 0.802389
| 0.772849
| 0.750625
| 0.722382
| 0.705899
| 0.699139
| 0
| 0.062437
| 0.263657
| 20,337
| 493
| 81
| 41.251521
| 0.658698
| 0.092787
| 0
| 0.594132
| 0
| 0
| 0.042166
| 0.003051
| 0
| 0
| 0
| 0
| 0.07824
| 1
| 0.080685
| false
| 0
| 0.036675
| 0
| 0.124694
| 0.002445
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f0321baac4fe78cb1d43c80ac7fb66d7b5d77d98
| 159
|
py
|
Python
|
lichee/utils/tfrecord/torch/__init__.py
|
Tencent/Lichee
|
7653becd6fbf8b0715f788af3c0507c012be08b4
|
[
"Apache-2.0"
] | 91
|
2021-10-30T02:25:05.000Z
|
2022-03-28T06:51:52.000Z
|
lichee/utils/tfrecord/torch/__init__.py
|
zhaijunyu/Lichee
|
7653becd6fbf8b0715f788af3c0507c012be08b4
|
[
"Apache-2.0"
] | 1
|
2021-12-17T09:30:25.000Z
|
2022-03-05T12:30:13.000Z
|
lichee/utils/tfrecord/torch/__init__.py
|
zhaijunyu/Lichee
|
7653becd6fbf8b0715f788af3c0507c012be08b4
|
[
"Apache-2.0"
] | 17
|
2021-11-04T07:50:23.000Z
|
2022-03-24T14:24:11.000Z
|
# -*- coding: utf-8 -*-
"""
tfrecord torch dataset实现
"""
from . import dataset
from .dataset import TFRecordDataset
from .dataset import MultiTFRecordDataset
| 17.666667
| 41
| 0.742138
| 17
| 159
| 6.941176
| 0.647059
| 0.186441
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007353
| 0.144654
| 159
| 8
| 42
| 19.875
| 0.860294
| 0.295597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.